text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
# In this example we show the use of the
# vtkBandedPolyDataContourFilter. This filter creates separate,
# constant colored bands for a range of scalar values. Each band is
# bounded by two scalar values, and the cell data lying within the
# value has the same cell scalar value.
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# The lookup table is similar to that used by maps. Two hues are used:
# a brown for land, and a blue for water. The value of the hue is
# changed to give the effect of elevation.
Scale = 5
lutWater = vtk.vtkLookupTable()
lutWater.SetNumberOfColors(10)
lutWater.SetHueRange(0.58, 0.58)
lutWater.SetSaturationRange(0.5, 0.1)
lutWater.SetValueRange(0.5, 1.0)
lutWater.Build()
lutLand = vtk.vtkLookupTable()
lutLand.SetNumberOfColors(10)
lutLand.SetHueRange(0.1, 0.1)
lutLand.SetSaturationRange(0.4, 0.1)
lutLand.SetValueRange(0.55, 0.9)
lutLand.Build()
# The DEM reader reads data and creates an output image.
demModel = vtk.vtkDEMReader()
demModel.SetFileName(VTK_DATA_ROOT + "/Data/SainteHelens.dem")
demModel.Update()
# We shrink the terrain data down a bit to yield better performance for
# this example.
shrinkFactor = 4
shrink = vtk.vtkImageShrink3D()
shrink.SetShrinkFactors(shrinkFactor, shrinkFactor, 1)
shrink.SetInputConnection(demModel.GetOutputPort())
shrink.AveragingOn()
# Convert the image into polygons.
geom = vtk.vtkImageDataGeometryFilter()
geom.SetInputConnection(shrink.GetOutputPort())
# Warp the polygons based on elevation.
warp = vtk.vtkWarpScalar()
warp.SetInputConnection(geom.GetOutputPort())
warp.SetNormal(0, 0, 1)
warp.UseNormalOn()
warp.SetScaleFactor(Scale)
# Create the contour bands.
bcf = vtk.vtkBandedPolyDataContourFilter()
bcf.SetInput(warp.GetPolyDataOutput())
bcf.GenerateValues(15, demModel.GetOutput().GetScalarRange())
bcf.SetScalarModeToIndex()
bcf.GenerateContourEdgesOn()
# Compute normals to give a better look.
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(bcf.GetOutputPort())
normals.SetFeatureAngle(60)
normals.ConsistencyOff()
normals.SplittingOff()
demMapper = vtk.vtkPolyDataMapper()
demMapper.SetInputConnection(normals.GetOutputPort())
demMapper.SetScalarRange(0, 10)
demMapper.SetLookupTable(lutLand)
demMapper.SetScalarModeToUseCellData()
demActor = vtk.vtkLODActor()
demActor.SetMapper(demMapper)
## Create contour edges
edgeMapper = vtk.vtkPolyDataMapper()
edgeMapper.SetInput(bcf.GetContourEdgesOutput())
edgeMapper.SetResolveCoincidentTopologyToPolygonOffset()
edgeActor = vtk.vtkActor()
edgeActor.SetMapper(edgeMapper)
edgeActor.GetProperty().SetColor(0, 0, 0)
## Test clipping
# Create the contour bands.
bcf2 = vtk.vtkBandedPolyDataContourFilter()
bcf2.SetInput(warp.GetPolyDataOutput())
bcf2.ClippingOn()
bcf2.GenerateValues(10, 1000, 2000)
bcf2.SetScalarModeToValue()
# Compute normals to give a better look.
normals2 = vtk.vtkPolyDataNormals()
normals2.SetInputConnection(bcf2.GetOutputPort())
normals2.SetFeatureAngle(60)
normals2.ConsistencyOff()
normals2.SplittingOff()
lut = vtk.vtkLookupTable()
lut.SetNumberOfColors(10)
demMapper2 = vtk.vtkPolyDataMapper()
demMapper2.SetInputConnection(normals2.GetOutputPort())
demMapper2.SetScalarRange(demModel.GetOutput().GetScalarRange())
demMapper2.SetLookupTable(lut)
demMapper2.SetScalarModeToUseCellData()
demActor2 = vtk.vtkLODActor()
demActor2.SetMapper(demMapper2)
demActor2.AddPosition(0, 15000, 0)
# Create the RenderWindow, Renderer and both Actors
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
ren.AddActor(demActor)
ren.AddActor(demActor2)
ren.AddActor(edgeActor)
ren.SetBackground(.4, .4, .4)
renWin.SetSize(375, 200)
cam = vtk.vtkCamera()
cam.SetPosition(-17438.8, 2410.62, 25470.8)
cam.SetFocalPoint(3985.35, 11930.6, 5922.14)
cam.SetViewUp(0, 0, 1)
ren.SetActiveCamera(cam)
ren.ResetCamera()
cam.Zoom(2)
iren.Initialize()
iren.SetDesiredUpdateRate(1)
def CheckAbort(obj, event):
foo = renWin.GetEventPending()
if foo != 0:
renWin.SetAbortRender(1)
renWin.AddObserver("AbortCheckEvent", CheckAbort)
renWin.Render()
renWin.Render()
iren.Start()
|
CMUSV-VisTrails/WorkflowRecommendation
|
examples/vtk_examples/VisualizationAlgorithms/BandContourTerrain.py
|
Python
|
bsd-3-clause
| 4,282
|
[
"VTK"
] |
f9e4f8fb48af2e5544d475696f5e8806b8d5773ef25ecbd32fb99fa4c55dd9bb
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Sat Jul 18 17:11:29 2015 by generateDS.py version 2.16a.
#
# Command line options:
# ('-o', 'meetCI.py')
# ('-s', 'meetCI_sub.py')
#
# Command line arguments:
# meetCI.xsd
#
# Command line:
# generateDS.py -o "meetCI.py" -s "meetCI_sub.py" meetCI.xsd
#
# Current working directory (os.getcwd()):
# generateDS-2.16a0
#
import sys
import re as re_
import base64
import datetime as datetime_
import warnings as warnings_
from lxml import etree as etree_
Validate_simpletypes_ = True
def parsexml_(infile, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
parser = etree_.ETCompatXMLParser()
doc = etree_.parse(infile, parser=parser, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError as exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node=None, input_name=''):
if not input_data:
return ''
else:
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_integer_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
int(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integers')
return values
def gds_format_float(self, input_data, input_name=''):
return ('%.15f' % input_data).rstrip('0')
def gds_validate_float(self, input_data, node=None, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_float_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of floats')
return values
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node=None, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_double_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of doubles')
return values
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node=None, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_boolean_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return values
def gds_validate_datetime(self, input_data, node=None, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
time_parts = input_data.split('.')
if len(time_parts) > 1:
micro_seconds = int(float('0.' + time_parts[1]) * 1000000)
input_data = '%s.%s' % (time_parts[0], micro_seconds, )
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node=None, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node=None, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_validate_simple_patterns(self, patterns, target):
# pat is a list of lists of strings/patterns. We should:
# - AND the outer elements
# - OR the inner elements
found1 = True
for patterns1 in patterns:
found2 = False
for patterns2 in patterns1:
if re_.search(patterns2, target) is not None:
found2 = True
break
if not found2:
found1 = False
break
return found1
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.iteritems()))
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
CDATA_pattern_ = re_.compile(r"<!\[CDATA\[.*?\]\]>", re_.DOTALL)
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
"Escape markup chars, but do not modify CDATA sections."
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s2 = ''
pos = 0
matchobjects = CDATA_pattern_.finditer(s1)
for mo in matchobjects:
s3 = s1[pos:mo.start()]
s2 += quote_xml_aux(s3)
s2 += s1[mo.start():mo.end()]
pos = mo.end()
s3 = s1[pos:]
s2 += quote_xml_aux(s3)
return s2
def quote_xml_aux(inStr):
s1 = inStr.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace, pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace, name, pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name, base64.b64encode(self.value), self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class MeetCI(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, MachineLearning=None, ExpertSystem=None):
self.original_tagname_ = None
self.MachineLearning = MachineLearning
self.ExpertSystem = ExpertSystem
def factory(*args_, **kwargs_):
if MeetCI.subclass:
return MeetCI.subclass(*args_, **kwargs_)
else:
return MeetCI(*args_, **kwargs_)
factory = staticmethod(factory)
def get_MachineLearning(self): return self.MachineLearning
def set_MachineLearning(self, MachineLearning): self.MachineLearning = MachineLearning
def get_ExpertSystem(self): return self.ExpertSystem
def set_ExpertSystem(self, ExpertSystem): self.ExpertSystem = ExpertSystem
def hasContent_(self):
if (
self.MachineLearning is not None or
self.ExpertSystem is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='MeetCI', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='MeetCI')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='MeetCI', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='MeetCI'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='MeetCI', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.MachineLearning is not None:
self.MachineLearning.export(outfile, level, namespace_, name_='MachineLearning', pretty_print=pretty_print)
if self.ExpertSystem is not None:
self.ExpertSystem.export(outfile, level, namespace_, name_='ExpertSystem', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='MeetCI'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.MachineLearning is not None:
showIndent(outfile, level)
outfile.write('MachineLearning=model_.MachineLearning(\n')
self.MachineLearning.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.ExpertSystem is not None:
showIndent(outfile, level)
outfile.write('ExpertSystem=model_.ExpertSystem(\n')
self.ExpertSystem.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'MachineLearning':
obj_ = MachineLearning.factory()
obj_.build(child_)
self.MachineLearning = obj_
obj_.original_tagname_ = 'MachineLearning'
elif nodeName_ == 'ExpertSystem':
obj_ = ExpertSystem.factory()
obj_.build(child_)
self.ExpertSystem = obj_
obj_.original_tagname_ = 'ExpertSystem'
# end class MeetCI
class MachineLearning(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, classification=None, prediction=None):
self.original_tagname_ = None
self.classification = classification
self.prediction = prediction
def factory(*args_, **kwargs_):
if MachineLearning.subclass:
return MachineLearning.subclass(*args_, **kwargs_)
else:
return MachineLearning(*args_, **kwargs_)
factory = staticmethod(factory)
def get_classification(self): return self.classification
def set_classification(self, classification): self.classification = classification
def get_prediction(self): return self.prediction
def set_prediction(self, prediction): self.prediction = prediction
def hasContent_(self):
if (
self.classification is not None or
self.prediction is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='MachineLearning', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='MachineLearning')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='MachineLearning', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='MachineLearning'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='MachineLearning', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.classification is not None:
self.classification.export(outfile, level, namespace_, name_='classification', pretty_print=pretty_print)
if self.prediction is not None:
self.prediction.export(outfile, level, namespace_, name_='prediction', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='MachineLearning'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.classification is not None:
showIndent(outfile, level)
outfile.write('classification=model_.classification(\n')
self.classification.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.prediction is not None:
showIndent(outfile, level)
outfile.write('prediction=model_.prediction(\n')
self.prediction.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'classification':
obj_ = classification.factory()
obj_.build(child_)
self.classification = obj_
obj_.original_tagname_ = 'classification'
elif nodeName_ == 'prediction':
obj_ = prediction.factory()
obj_.build(child_)
self.prediction = obj_
obj_.original_tagname_ = 'prediction'
# end class MachineLearning
class MultiLayerPerceptron(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, inputLayerActivation='Linear', hiddenLayerActivation=None, hiddenLayers=None, outputLayerActivation=None, momentum=None, epochs=None, learningRate=None):
self.original_tagname_ = None
self.inputLayerActivation = inputLayerActivation
self.validate_inputLayerActivationType(self.inputLayerActivation)
self.hiddenLayerActivation = hiddenLayerActivation
self.validate_hiddenLayerActivationType(self.hiddenLayerActivation)
self.hiddenLayers = hiddenLayers
self.validate_hiddenLayersType(self.hiddenLayers)
self.outputLayerActivation = outputLayerActivation
self.validate_outputLayerActivationType(self.outputLayerActivation)
self.momentum = momentum
self.epochs = epochs
self.learningRate = learningRate
def factory(*args_, **kwargs_):
if MultiLayerPerceptron.subclass:
return MultiLayerPerceptron.subclass(*args_, **kwargs_)
else:
return MultiLayerPerceptron(*args_, **kwargs_)
factory = staticmethod(factory)
def get_inputLayerActivation(self): return self.inputLayerActivation
def set_inputLayerActivation(self, inputLayerActivation): self.inputLayerActivation = inputLayerActivation
def get_hiddenLayerActivation(self): return self.hiddenLayerActivation
def set_hiddenLayerActivation(self, hiddenLayerActivation): self.hiddenLayerActivation = hiddenLayerActivation
def get_hiddenLayers(self): return self.hiddenLayers
def set_hiddenLayers(self, hiddenLayers): self.hiddenLayers = hiddenLayers
def get_outputLayerActivation(self): return self.outputLayerActivation
def set_outputLayerActivation(self, outputLayerActivation): self.outputLayerActivation = outputLayerActivation
def get_momentum(self): return self.momentum
def set_momentum(self, momentum): self.momentum = momentum
def get_epochs(self): return self.epochs
def set_epochs(self, epochs): self.epochs = epochs
def get_learningRate(self): return self.learningRate
def set_learningRate(self, learningRate): self.learningRate = learningRate
def validate_inputLayerActivationType(self, value):
# Validate type inputLayerActivationType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['Linear', 'Ramp', 'Step', 'Sigmoid', 'Tanh', 'Gaussian', 'Trapezoid', 'Sgn', 'Sin', 'Log']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on inputLayerActivationType' % {"value" : value.encode("utf-8")} )
def validate_hiddenLayerActivationType(self, value):
# Validate type hiddenLayerActivationType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['Linear', 'Ramp', 'Step', 'Sigmoid', 'Tanh', 'Gaussian', 'Trapezoid', 'Sgn', 'Sin', 'Log', 'Softmax']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on hiddenLayerActivationType' % {"value" : value.encode("utf-8")} )
def validate_hiddenLayersType(self, value):
# Validate type hiddenLayersType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
if not self.gds_validate_simple_patterns(
self.validate_hiddenLayersType_patterns_, value):
warnings_.warn('Value "%s" does not match xsd pattern restrictions: %s' % (value.encode('utf-8'), self.validate_hiddenLayersType_patterns_, ))
validate_hiddenLayersType_patterns_ = [['^([0-9])+(,([0-9])+)$']]
def validate_outputLayerActivationType(self, value):
# Validate type outputLayerActivationType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['Linear', 'Ramp', 'Step', 'Sigmoid', 'Tanh', 'Gaussian', 'Trapezoid', 'Sgn', 'Sin', 'Log', 'Softmax']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on outputLayerActivationType' % {"value" : value.encode("utf-8")} )
def hasContent_(self):
if (
self.inputLayerActivation != "Linear" or
self.hiddenLayerActivation is not None or
self.hiddenLayers is not None or
self.outputLayerActivation is not None or
self.momentum is not None or
self.epochs is not None or
self.learningRate is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='MultiLayerPerceptron', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='MultiLayerPerceptron')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='MultiLayerPerceptron', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='MultiLayerPerceptron'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='MultiLayerPerceptron', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.inputLayerActivation != "Linear":
showIndent(outfile, level, pretty_print)
outfile.write('<%sinputLayerActivation>%s</%sinputLayerActivation>%s' % (namespace_, self.gds_format_string(quote_xml(self.inputLayerActivation).encode(ExternalEncoding), input_name='inputLayerActivation'), namespace_, eol_))
if self.hiddenLayerActivation is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%shiddenLayerActivation>%s</%shiddenLayerActivation>%s' % (namespace_, self.gds_format_string(quote_xml(self.hiddenLayerActivation).encode(ExternalEncoding), input_name='hiddenLayerActivation'), namespace_, eol_))
if self.hiddenLayers is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%shiddenLayers>%s</%shiddenLayers>%s' % (namespace_, self.gds_format_string(quote_xml(self.hiddenLayers).encode(ExternalEncoding), input_name='hiddenLayers'), namespace_, eol_))
if self.outputLayerActivation is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%soutputLayerActivation>%s</%soutputLayerActivation>%s' % (namespace_, self.gds_format_string(quote_xml(self.outputLayerActivation).encode(ExternalEncoding), input_name='outputLayerActivation'), namespace_, eol_))
if self.momentum is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%smomentum>%s</%smomentum>%s' % (namespace_, self.gds_format_float(self.momentum, input_name='momentum'), namespace_, eol_))
if self.epochs is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sepochs>%s</%sepochs>%s' % (namespace_, self.gds_format_integer(self.epochs, input_name='epochs'), namespace_, eol_))
if self.learningRate is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%slearningRate>%s</%slearningRate>%s' % (namespace_, self.gds_format_float(self.learningRate, input_name='learningRate'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='MultiLayerPerceptron'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.inputLayerActivation is not None:
showIndent(outfile, level)
outfile.write('inputLayerActivation=%s,\n' % quote_python(self.inputLayerActivation).encode(ExternalEncoding))
if self.hiddenLayerActivation is not None:
showIndent(outfile, level)
outfile.write('hiddenLayerActivation=%s,\n' % quote_python(self.hiddenLayerActivation).encode(ExternalEncoding))
if self.hiddenLayers is not None:
showIndent(outfile, level)
outfile.write('hiddenLayers=%s,\n' % quote_python(self.hiddenLayers).encode(ExternalEncoding))
if self.outputLayerActivation is not None:
showIndent(outfile, level)
outfile.write('outputLayerActivation=%s,\n' % quote_python(self.outputLayerActivation).encode(ExternalEncoding))
if self.momentum is not None:
showIndent(outfile, level)
outfile.write('momentum=%f,\n' % self.momentum)
if self.epochs is not None:
showIndent(outfile, level)
outfile.write('epochs=%d,\n' % self.epochs)
if self.learningRate is not None:
showIndent(outfile, level)
outfile.write('learningRate=%f,\n' % self.learningRate)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'inputLayerActivation':
inputLayerActivation_ = child_.text
inputLayerActivation_ = self.gds_validate_string(inputLayerActivation_, node, 'inputLayerActivation')
self.inputLayerActivation = inputLayerActivation_
# validate type inputLayerActivationType
self.validate_inputLayerActivationType(self.inputLayerActivation)
elif nodeName_ == 'hiddenLayerActivation':
hiddenLayerActivation_ = child_.text
hiddenLayerActivation_ = self.gds_validate_string(hiddenLayerActivation_, node, 'hiddenLayerActivation')
self.hiddenLayerActivation = hiddenLayerActivation_
# validate type hiddenLayerActivationType
self.validate_hiddenLayerActivationType(self.hiddenLayerActivation)
elif nodeName_ == 'hiddenLayers':
hiddenLayers_ = child_.text
hiddenLayers_ = self.gds_validate_string(hiddenLayers_, node, 'hiddenLayers')
self.hiddenLayers = hiddenLayers_
# validate type hiddenLayersType
self.validate_hiddenLayersType(self.hiddenLayers)
elif nodeName_ == 'outputLayerActivation':
outputLayerActivation_ = child_.text
outputLayerActivation_ = self.gds_validate_string(outputLayerActivation_, node, 'outputLayerActivation')
self.outputLayerActivation = outputLayerActivation_
# validate type outputLayerActivationType
self.validate_outputLayerActivationType(self.outputLayerActivation)
elif nodeName_ == 'momentum':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'momentum')
self.momentum = fval_
elif nodeName_ == 'epochs':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'epochs')
self.epochs = ival_
elif nodeName_ == 'learningRate':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'learningRate')
self.learningRate = fval_
# end class MultiLayerPerceptron
class RadialBasisFunctionNetwork(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, hiddenNeurons=None, outputLayerActivation=None, momentum=None, epochs=None, learningRate=None):
self.original_tagname_ = None
self.hiddenNeurons = hiddenNeurons
self.outputLayerActivation = outputLayerActivation
self.validate_outputLayerActivationType1(self.outputLayerActivation)
self.momentum = momentum
self.epochs = epochs
self.learningRate = learningRate
def factory(*args_, **kwargs_):
if RadialBasisFunctionNetwork.subclass:
return RadialBasisFunctionNetwork.subclass(*args_, **kwargs_)
else:
return RadialBasisFunctionNetwork(*args_, **kwargs_)
factory = staticmethod(factory)
def get_hiddenNeurons(self): return self.hiddenNeurons
def set_hiddenNeurons(self, hiddenNeurons): self.hiddenNeurons = hiddenNeurons
def get_outputLayerActivation(self): return self.outputLayerActivation
def set_outputLayerActivation(self, outputLayerActivation): self.outputLayerActivation = outputLayerActivation
def get_momentum(self): return self.momentum
def set_momentum(self, momentum): self.momentum = momentum
def get_epochs(self): return self.epochs
def set_epochs(self, epochs): self.epochs = epochs
def get_learningRate(self): return self.learningRate
def set_learningRate(self, learningRate): self.learningRate = learningRate
def validate_outputLayerActivationType1(self, value):
# Validate type outputLayerActivationType1, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['Linear', 'Ramp', 'Step', 'Sigmoid', 'Tanh', 'Gaussian', 'Trapezoid', 'Sgn', 'Sin', 'Log', 'Softmax']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on outputLayerActivationType1' % {"value" : value.encode("utf-8")} )
def hasContent_(self):
if (
self.hiddenNeurons is not None or
self.outputLayerActivation is not None or
self.momentum is not None or
self.epochs is not None or
self.learningRate is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='RadialBasisFunctionNetwork', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RadialBasisFunctionNetwork')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='RadialBasisFunctionNetwork', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RadialBasisFunctionNetwork'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='RadialBasisFunctionNetwork', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.hiddenNeurons is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%shiddenNeurons>%s</%shiddenNeurons>%s' % (namespace_, self.gds_format_integer(self.hiddenNeurons, input_name='hiddenNeurons'), namespace_, eol_))
if self.outputLayerActivation is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%soutputLayerActivation>%s</%soutputLayerActivation>%s' % (namespace_, self.gds_format_string(quote_xml(self.outputLayerActivation).encode(ExternalEncoding), input_name='outputLayerActivation'), namespace_, eol_))
if self.momentum is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%smomentum>%s</%smomentum>%s' % (namespace_, self.gds_format_float(self.momentum, input_name='momentum'), namespace_, eol_))
if self.epochs is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sepochs>%s</%sepochs>%s' % (namespace_, self.gds_format_integer(self.epochs, input_name='epochs'), namespace_, eol_))
if self.learningRate is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%slearningRate>%s</%slearningRate>%s' % (namespace_, self.gds_format_float(self.learningRate, input_name='learningRate'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='RadialBasisFunctionNetwork'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.hiddenNeurons is not None:
showIndent(outfile, level)
outfile.write('hiddenNeurons=%d,\n' % self.hiddenNeurons)
if self.outputLayerActivation is not None:
showIndent(outfile, level)
outfile.write('outputLayerActivation=%s,\n' % quote_python(self.outputLayerActivation).encode(ExternalEncoding))
if self.momentum is not None:
showIndent(outfile, level)
outfile.write('momentum=%f,\n' % self.momentum)
if self.epochs is not None:
showIndent(outfile, level)
outfile.write('epochs=%d,\n' % self.epochs)
if self.learningRate is not None:
showIndent(outfile, level)
outfile.write('learningRate=%f,\n' % self.learningRate)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'hiddenNeurons':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'hiddenNeurons')
self.hiddenNeurons = ival_
elif nodeName_ == 'outputLayerActivation':
outputLayerActivation_ = child_.text
outputLayerActivation_ = self.gds_validate_string(outputLayerActivation_, node, 'outputLayerActivation')
self.outputLayerActivation = outputLayerActivation_
# validate type outputLayerActivationType1
self.validate_outputLayerActivationType1(self.outputLayerActivation)
elif nodeName_ == 'momentum':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'momentum')
self.momentum = fval_
elif nodeName_ == 'epochs':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'epochs')
self.epochs = ival_
elif nodeName_ == 'learningRate':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'learningRate')
self.learningRate = fval_
# end class RadialBasisFunctionNetwork
class RecurrentNeuralNetwork(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, RNN_Type=None, hiddenLayerActivation=None, hiddenNeurons=None, outputLayerActivation=None, epochs=None, momentum=None, learningRate=None):
self.original_tagname_ = None
self.RNN_Type = RNN_Type
self.validate_RNN_TypeType(self.RNN_Type)
self.hiddenLayerActivation = hiddenLayerActivation
self.validate_hiddenLayerActivationType2(self.hiddenLayerActivation)
self.hiddenNeurons = hiddenNeurons
self.outputLayerActivation = outputLayerActivation
self.validate_outputLayerActivationType3(self.outputLayerActivation)
self.epochs = epochs
self.momentum = momentum
self.learningRate = learningRate
def factory(*args_, **kwargs_):
if RecurrentNeuralNetwork.subclass:
return RecurrentNeuralNetwork.subclass(*args_, **kwargs_)
else:
return RecurrentNeuralNetwork(*args_, **kwargs_)
factory = staticmethod(factory)
def get_RNN_Type(self): return self.RNN_Type
def set_RNN_Type(self, RNN_Type): self.RNN_Type = RNN_Type
def get_hiddenLayerActivation(self): return self.hiddenLayerActivation
def set_hiddenLayerActivation(self, hiddenLayerActivation): self.hiddenLayerActivation = hiddenLayerActivation
def get_hiddenNeurons(self): return self.hiddenNeurons
def set_hiddenNeurons(self, hiddenNeurons): self.hiddenNeurons = hiddenNeurons
def get_outputLayerActivation(self): return self.outputLayerActivation
def set_outputLayerActivation(self, outputLayerActivation): self.outputLayerActivation = outputLayerActivation
def get_epochs(self): return self.epochs
def set_epochs(self, epochs): self.epochs = epochs
def get_momentum(self): return self.momentum
def set_momentum(self, momentum): self.momentum = momentum
def get_learningRate(self): return self.learningRate
def set_learningRate(self, learningRate): self.learningRate = learningRate
def validate_RNN_TypeType(self, value):
# Validate type RNN_TypeType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['Elman', 'Jordan']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on RNN_TypeType' % {"value" : value.encode("utf-8")} )
def validate_hiddenLayerActivationType2(self, value):
# Validate type hiddenLayerActivationType2, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['Linear', 'Ramp', 'Step', 'Sigmoid', 'Tanh', 'Gaussian', 'Trapezoid', 'Sgn', 'Sin', 'Log', 'Softmax']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on hiddenLayerActivationType2' % {"value" : value.encode("utf-8")} )
def validate_outputLayerActivationType3(self, value):
# Validate type outputLayerActivationType3, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['Linear', 'Ramp', 'Step', 'Sigmoid', 'Tanh', 'Gaussian', 'Trapezoid', 'Sgn', 'Sin', 'Log', 'Softmax']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on outputLayerActivationType3' % {"value" : value.encode("utf-8")} )
def hasContent_(self):
if (
self.RNN_Type is not None or
self.hiddenLayerActivation is not None or
self.hiddenNeurons is not None or
self.outputLayerActivation is not None or
self.epochs is not None or
self.momentum is not None or
self.learningRate is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='RecurrentNeuralNetwork', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RecurrentNeuralNetwork')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='RecurrentNeuralNetwork', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RecurrentNeuralNetwork'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='RecurrentNeuralNetwork', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.RNN_Type is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sRNN_Type>%s</%sRNN_Type>%s' % (namespace_, self.gds_format_string(quote_xml(self.RNN_Type).encode(ExternalEncoding), input_name='RNN_Type'), namespace_, eol_))
if self.hiddenLayerActivation is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%shiddenLayerActivation>%s</%shiddenLayerActivation>%s' % (namespace_, self.gds_format_string(quote_xml(self.hiddenLayerActivation).encode(ExternalEncoding), input_name='hiddenLayerActivation'), namespace_, eol_))
if self.hiddenNeurons is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%shiddenNeurons>%s</%shiddenNeurons>%s' % (namespace_, self.gds_format_integer(self.hiddenNeurons, input_name='hiddenNeurons'), namespace_, eol_))
if self.outputLayerActivation is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%soutputLayerActivation>%s</%soutputLayerActivation>%s' % (namespace_, self.gds_format_string(quote_xml(self.outputLayerActivation).encode(ExternalEncoding), input_name='outputLayerActivation'), namespace_, eol_))
if self.epochs is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sepochs>%s</%sepochs>%s' % (namespace_, self.gds_format_integer(self.epochs, input_name='epochs'), namespace_, eol_))
if self.momentum is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%smomentum>%s</%smomentum>%s' % (namespace_, self.gds_format_float(self.momentum, input_name='momentum'), namespace_, eol_))
if self.learningRate is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%slearningRate>%s</%slearningRate>%s' % (namespace_, self.gds_format_float(self.learningRate, input_name='learningRate'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='RecurrentNeuralNetwork'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.RNN_Type is not None:
showIndent(outfile, level)
outfile.write('RNN_Type=%s,\n' % quote_python(self.RNN_Type).encode(ExternalEncoding))
if self.hiddenLayerActivation is not None:
showIndent(outfile, level)
outfile.write('hiddenLayerActivation=%s,\n' % quote_python(self.hiddenLayerActivation).encode(ExternalEncoding))
if self.hiddenNeurons is not None:
showIndent(outfile, level)
outfile.write('hiddenNeurons=%d,\n' % self.hiddenNeurons)
if self.outputLayerActivation is not None:
showIndent(outfile, level)
outfile.write('outputLayerActivation=%s,\n' % quote_python(self.outputLayerActivation).encode(ExternalEncoding))
if self.epochs is not None:
showIndent(outfile, level)
outfile.write('epochs=%d,\n' % self.epochs)
if self.momentum is not None:
showIndent(outfile, level)
outfile.write('momentum=%f,\n' % self.momentum)
if self.learningRate is not None:
showIndent(outfile, level)
outfile.write('learningRate=%f,\n' % self.learningRate)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'RNN_Type':
RNN_Type_ = child_.text
RNN_Type_ = self.gds_validate_string(RNN_Type_, node, 'RNN_Type')
self.RNN_Type = RNN_Type_
# validate type RNN_TypeType
self.validate_RNN_TypeType(self.RNN_Type)
elif nodeName_ == 'hiddenLayerActivation':
hiddenLayerActivation_ = child_.text
hiddenLayerActivation_ = self.gds_validate_string(hiddenLayerActivation_, node, 'hiddenLayerActivation')
self.hiddenLayerActivation = hiddenLayerActivation_
# validate type hiddenLayerActivationType2
self.validate_hiddenLayerActivationType2(self.hiddenLayerActivation)
elif nodeName_ == 'hiddenNeurons':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'hiddenNeurons')
self.hiddenNeurons = ival_
elif nodeName_ == 'outputLayerActivation':
outputLayerActivation_ = child_.text
outputLayerActivation_ = self.gds_validate_string(outputLayerActivation_, node, 'outputLayerActivation')
self.outputLayerActivation = outputLayerActivation_
# validate type outputLayerActivationType3
self.validate_outputLayerActivationType3(self.outputLayerActivation)
elif nodeName_ == 'epochs':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'epochs')
self.epochs = ival_
elif nodeName_ == 'momentum':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'momentum')
self.momentum = fval_
elif nodeName_ == 'learningRate':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'learningRate')
self.learningRate = fval_
# end class RecurrentNeuralNetwork
class RandomForest(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, nTrees=None, maxDepth=None, maxLeafNodes=None, minSamplesSplit=None, minSamplesLeaf=None, minFractionLeaf=None):
self.original_tagname_ = None
self.nTrees = nTrees
self.maxDepth = maxDepth
self.maxLeafNodes = maxLeafNodes
self.minSamplesSplit = minSamplesSplit
self.minSamplesLeaf = minSamplesLeaf
self.minFractionLeaf = minFractionLeaf
def factory(*args_, **kwargs_):
if RandomForest.subclass:
return RandomForest.subclass(*args_, **kwargs_)
else:
return RandomForest(*args_, **kwargs_)
factory = staticmethod(factory)
def get_nTrees(self): return self.nTrees
def set_nTrees(self, nTrees): self.nTrees = nTrees
def get_maxDepth(self): return self.maxDepth
def set_maxDepth(self, maxDepth): self.maxDepth = maxDepth
def get_maxLeafNodes(self): return self.maxLeafNodes
def set_maxLeafNodes(self, maxLeafNodes): self.maxLeafNodes = maxLeafNodes
def get_minSamplesSplit(self): return self.minSamplesSplit
def set_minSamplesSplit(self, minSamplesSplit): self.minSamplesSplit = minSamplesSplit
def get_minSamplesLeaf(self): return self.minSamplesLeaf
def set_minSamplesLeaf(self, minSamplesLeaf): self.minSamplesLeaf = minSamplesLeaf
def get_minFractionLeaf(self): return self.minFractionLeaf
def set_minFractionLeaf(self, minFractionLeaf): self.minFractionLeaf = minFractionLeaf
def hasContent_(self):
if (
self.nTrees is not None or
self.maxDepth is not None or
self.maxLeafNodes is not None or
self.minSamplesSplit is not None or
self.minSamplesLeaf is not None or
self.minFractionLeaf is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='RandomForest', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RandomForest')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='RandomForest', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RandomForest'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='RandomForest', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.nTrees is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%snTrees>%s</%snTrees>%s' % (namespace_, self.gds_format_integer(self.nTrees, input_name='nTrees'), namespace_, eol_))
if self.maxDepth is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%smaxDepth>%s</%smaxDepth>%s' % (namespace_, self.gds_format_integer(self.maxDepth, input_name='maxDepth'), namespace_, eol_))
if self.maxLeafNodes is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%smaxLeafNodes>%s</%smaxLeafNodes>%s' % (namespace_, self.gds_format_integer(self.maxLeafNodes, input_name='maxLeafNodes'), namespace_, eol_))
if self.minSamplesSplit is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sminSamplesSplit>%s</%sminSamplesSplit>%s' % (namespace_, self.gds_format_integer(self.minSamplesSplit, input_name='minSamplesSplit'), namespace_, eol_))
if self.minSamplesLeaf is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sminSamplesLeaf>%s</%sminSamplesLeaf>%s' % (namespace_, self.gds_format_integer(self.minSamplesLeaf, input_name='minSamplesLeaf'), namespace_, eol_))
if self.minFractionLeaf is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sminFractionLeaf>%s</%sminFractionLeaf>%s' % (namespace_, self.gds_format_float(self.minFractionLeaf, input_name='minFractionLeaf'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='RandomForest'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.nTrees is not None:
showIndent(outfile, level)
outfile.write('nTrees=%d,\n' % self.nTrees)
if self.maxDepth is not None:
showIndent(outfile, level)
outfile.write('maxDepth=%d,\n' % self.maxDepth)
if self.maxLeafNodes is not None:
showIndent(outfile, level)
outfile.write('maxLeafNodes=%d,\n' % self.maxLeafNodes)
if self.minSamplesSplit is not None:
showIndent(outfile, level)
outfile.write('minSamplesSplit=%d,\n' % self.minSamplesSplit)
if self.minSamplesLeaf is not None:
showIndent(outfile, level)
outfile.write('minSamplesLeaf=%d,\n' % self.minSamplesLeaf)
if self.minFractionLeaf is not None:
showIndent(outfile, level)
outfile.write('minFractionLeaf=%f,\n' % self.minFractionLeaf)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'nTrees':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'nTrees')
self.nTrees = ival_
elif nodeName_ == 'maxDepth':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'maxDepth')
self.maxDepth = ival_
elif nodeName_ == 'maxLeafNodes':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'maxLeafNodes')
self.maxLeafNodes = ival_
elif nodeName_ == 'minSamplesSplit':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'minSamplesSplit')
self.minSamplesSplit = ival_
elif nodeName_ == 'minSamplesLeaf':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'minSamplesLeaf')
self.minSamplesLeaf = ival_
elif nodeName_ == 'minFractionLeaf':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'minFractionLeaf')
self.minFractionLeaf = fval_
# end class RandomForest
class SupportVectorMachine(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, kernel=None, degree=None, gamma=None, coef=None, tol=None, maxIter=None):
self.original_tagname_ = None
self.kernel = kernel
self.validate_kernelType(self.kernel)
self.degree = degree
self.gamma = gamma
self.coef = coef
self.tol = tol
self.maxIter = maxIter
def factory(*args_, **kwargs_):
if SupportVectorMachine.subclass:
return SupportVectorMachine.subclass(*args_, **kwargs_)
else:
return SupportVectorMachine(*args_, **kwargs_)
factory = staticmethod(factory)
def get_kernel(self): return self.kernel
def set_kernel(self, kernel): self.kernel = kernel
def get_degree(self): return self.degree
def set_degree(self, degree): self.degree = degree
def get_gamma(self): return self.gamma
def set_gamma(self, gamma): self.gamma = gamma
def get_coef(self): return self.coef
def set_coef(self, coef): self.coef = coef
def get_tol(self): return self.tol
def set_tol(self, tol): self.tol = tol
def get_maxIter(self): return self.maxIter
def set_maxIter(self, maxIter): self.maxIter = maxIter
def validate_kernelType(self, value):
# Validate type kernelType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['Linear', 'RBF', 'Sigmoid', 'Poly', 'Precomputed', 'Trapezoid']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on kernelType' % {"value" : value.encode("utf-8")} )
def hasContent_(self):
if (
self.kernel is not None or
self.degree is not None or
self.gamma is not None or
self.coef is not None or
self.tol is not None or
self.maxIter is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='SupportVectorMachine', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='SupportVectorMachine')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='SupportVectorMachine', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='SupportVectorMachine'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='SupportVectorMachine', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.kernel is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%skernel>%s</%skernel>%s' % (namespace_, self.gds_format_string(quote_xml(self.kernel).encode(ExternalEncoding), input_name='kernel'), namespace_, eol_))
if self.degree is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdegree>%s</%sdegree>%s' % (namespace_, self.gds_format_integer(self.degree, input_name='degree'), namespace_, eol_))
if self.gamma is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sgamma>%s</%sgamma>%s' % (namespace_, self.gds_format_double(self.gamma, input_name='gamma'), namespace_, eol_))
if self.coef is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scoef>%s</%scoef>%s' % (namespace_, self.gds_format_double(self.coef, input_name='coef'), namespace_, eol_))
if self.tol is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%stol>%s</%stol>%s' % (namespace_, self.gds_format_double(self.tol, input_name='tol'), namespace_, eol_))
if self.maxIter is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%smaxIter>%s</%smaxIter>%s' % (namespace_, self.gds_format_integer(self.maxIter, input_name='maxIter'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='SupportVectorMachine'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.kernel is not None:
showIndent(outfile, level)
outfile.write('kernel=%s,\n' % quote_python(self.kernel).encode(ExternalEncoding))
if self.degree is not None:
showIndent(outfile, level)
outfile.write('degree=%d,\n' % self.degree)
if self.gamma is not None:
showIndent(outfile, level)
outfile.write('gamma=%e,\n' % self.gamma)
if self.coef is not None:
showIndent(outfile, level)
outfile.write('coef=%e,\n' % self.coef)
if self.tol is not None:
showIndent(outfile, level)
outfile.write('tol=%e,\n' % self.tol)
if self.maxIter is not None:
showIndent(outfile, level)
outfile.write('maxIter=%d,\n' % self.maxIter)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'kernel':
kernel_ = child_.text
kernel_ = self.gds_validate_string(kernel_, node, 'kernel')
self.kernel = kernel_
# validate type kernelType
self.validate_kernelType(self.kernel)
elif nodeName_ == 'degree':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'degree')
self.degree = ival_
elif nodeName_ == 'gamma':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'gamma')
self.gamma = fval_
elif nodeName_ == 'coef':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'coef')
self.coef = fval_
elif nodeName_ == 'tol':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'tol')
self.tol = fval_
elif nodeName_ == 'maxIter':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'maxIter')
self.maxIter = ival_
# end class SupportVectorMachine
class classification(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, datafile=None, input=None, output=None, classes=None, split=None, delimiter=None, algorithm=None):
self.original_tagname_ = None
self.datafile = datafile
self.input = input
self.output = output
self.classes = classes
self.split = split
self.delimiter = delimiter
self.algorithm = algorithm
def factory(*args_, **kwargs_):
if classification.subclass:
return classification.subclass(*args_, **kwargs_)
else:
return classification(*args_, **kwargs_)
factory = staticmethod(factory)
def get_datafile(self): return self.datafile
def set_datafile(self, datafile): self.datafile = datafile
def get_input(self): return self.input
def set_input(self, input): self.input = input
def get_output(self): return self.output
def set_output(self, output): self.output = output
def get_classes(self): return self.classes
def set_classes(self, classes): self.classes = classes
def get_split(self): return self.split
def set_split(self, split): self.split = split
def get_delimiter(self): return self.delimiter
def set_delimiter(self, delimiter): self.delimiter = delimiter
def get_algorithm(self): return self.algorithm
def set_algorithm(self, algorithm): self.algorithm = algorithm
def hasContent_(self):
if (
self.datafile is not None or
self.input is not None or
self.output is not None or
self.classes is not None or
self.split is not None or
self.delimiter is not None or
self.algorithm is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='classification', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='classification')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='classification', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='classification'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='classification', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.datafile is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdatafile>%s</%sdatafile>%s' % (namespace_, self.gds_format_string(quote_xml(self.datafile).encode(ExternalEncoding), input_name='datafile'), namespace_, eol_))
if self.input is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sinput>%s</%sinput>%s' % (namespace_, self.gds_format_integer(self.input, input_name='input'), namespace_, eol_))
if self.output is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%soutput>%s</%soutput>%s' % (namespace_, self.gds_format_integer(self.output, input_name='output'), namespace_, eol_))
if self.classes is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sclasses>%s</%sclasses>%s' % (namespace_, self.gds_format_integer(self.classes, input_name='classes'), namespace_, eol_))
if self.split is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%ssplit>%s</%ssplit>%s' % (namespace_, self.gds_format_float(self.split, input_name='split'), namespace_, eol_))
if self.delimiter is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdelimiter>%s</%sdelimiter>%s' % (namespace_, self.gds_format_string(quote_xml(self.delimiter).encode(ExternalEncoding), input_name='delimiter'), namespace_, eol_))
if self.algorithm is not None:
self.algorithm.export(outfile, level, namespace_, name_='algorithm', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='classification'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.datafile is not None:
showIndent(outfile, level)
outfile.write('datafile=%s,\n' % quote_python(self.datafile).encode(ExternalEncoding))
if self.input is not None:
showIndent(outfile, level)
outfile.write('input=%d,\n' % self.input)
if self.output is not None:
showIndent(outfile, level)
outfile.write('output=%d,\n' % self.output)
if self.classes is not None:
showIndent(outfile, level)
outfile.write('classes=%d,\n' % self.classes)
if self.split is not None:
showIndent(outfile, level)
outfile.write('split=%f,\n' % self.split)
if self.delimiter is not None:
showIndent(outfile, level)
outfile.write('delimiter=%s,\n' % quote_python(self.delimiter).encode(ExternalEncoding))
if self.algorithm is not None:
showIndent(outfile, level)
outfile.write('algorithm=model_.algorithmType(\n')
self.algorithm.exportLiteral(outfile, level, name_='algorithm')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'datafile':
datafile_ = child_.text
datafile_ = self.gds_validate_string(datafile_, node, 'datafile')
self.datafile = datafile_
elif nodeName_ == 'input':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'input')
self.input = ival_
elif nodeName_ == 'output':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'output')
self.output = ival_
elif nodeName_ == 'classes':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'classes')
self.classes = ival_
elif nodeName_ == 'split':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'split')
self.split = fval_
elif nodeName_ == 'delimiter':
delimiter_ = child_.text
delimiter_ = self.gds_validate_string(delimiter_, node, 'delimiter')
self.delimiter = delimiter_
elif nodeName_ == 'algorithm':
obj_ = algorithmType.factory()
obj_.build(child_)
self.algorithm = obj_
obj_.original_tagname_ = 'algorithm'
# end class classification
class prediction(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, datafile=None, input=None, output=None, classes=None, split=None, delimiter=None, algorithm=None):
self.original_tagname_ = None
self.datafile = datafile
self.input = input
self.output = output
self.classes = classes
self.split = split
self.delimiter = delimiter
self.algorithm = algorithm
def factory(*args_, **kwargs_):
if prediction.subclass:
return prediction.subclass(*args_, **kwargs_)
else:
return prediction(*args_, **kwargs_)
factory = staticmethod(factory)
def get_datafile(self): return self.datafile
def set_datafile(self, datafile): self.datafile = datafile
def get_input(self): return self.input
def set_input(self, input): self.input = input
def get_output(self): return self.output
def set_output(self, output): self.output = output
def get_classes(self): return self.classes
def set_classes(self, classes): self.classes = classes
def get_split(self): return self.split
def set_split(self, split): self.split = split
def get_delimiter(self): return self.delimiter
def set_delimiter(self, delimiter): self.delimiter = delimiter
def get_algorithm(self): return self.algorithm
def set_algorithm(self, algorithm): self.algorithm = algorithm
def hasContent_(self):
if (
self.datafile is not None or
self.input is not None or
self.output is not None or
self.classes is not None or
self.split is not None or
self.delimiter is not None or
self.algorithm is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='prediction', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='prediction')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='prediction', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='prediction'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='prediction', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.datafile is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdatafile>%s</%sdatafile>%s' % (namespace_, self.gds_format_string(quote_xml(self.datafile).encode(ExternalEncoding), input_name='datafile'), namespace_, eol_))
if self.input is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sinput>%s</%sinput>%s' % (namespace_, self.gds_format_integer(self.input, input_name='input'), namespace_, eol_))
if self.output is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%soutput>%s</%soutput>%s' % (namespace_, self.gds_format_integer(self.output, input_name='output'), namespace_, eol_))
if self.classes is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sclasses>%s</%sclasses>%s' % (namespace_, self.gds_format_integer(self.classes, input_name='classes'), namespace_, eol_))
if self.split is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%ssplit>%s</%ssplit>%s' % (namespace_, self.gds_format_float(self.split, input_name='split'), namespace_, eol_))
if self.delimiter is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdelimiter>%s</%sdelimiter>%s' % (namespace_, self.gds_format_string(quote_xml(self.delimiter).encode(ExternalEncoding), input_name='delimiter'), namespace_, eol_))
if self.algorithm is not None:
self.algorithm.export(outfile, level, namespace_, name_='algorithm', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='prediction'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.datafile is not None:
showIndent(outfile, level)
outfile.write('datafile=%s,\n' % quote_python(self.datafile).encode(ExternalEncoding))
if self.input is not None:
showIndent(outfile, level)
outfile.write('input=%d,\n' % self.input)
if self.output is not None:
showIndent(outfile, level)
outfile.write('output=%d,\n' % self.output)
if self.classes is not None:
showIndent(outfile, level)
outfile.write('classes=%d,\n' % self.classes)
if self.split is not None:
showIndent(outfile, level)
outfile.write('split=%f,\n' % self.split)
if self.delimiter is not None:
showIndent(outfile, level)
outfile.write('delimiter=%s,\n' % quote_python(self.delimiter).encode(ExternalEncoding))
if self.algorithm is not None:
showIndent(outfile, level)
outfile.write('algorithm=model_.algorithmType4(\n')
self.algorithm.exportLiteral(outfile, level, name_='algorithm')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'datafile':
datafile_ = child_.text
datafile_ = self.gds_validate_string(datafile_, node, 'datafile')
self.datafile = datafile_
elif nodeName_ == 'input':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'input')
self.input = ival_
elif nodeName_ == 'output':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'output')
self.output = ival_
elif nodeName_ == 'classes':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'classes')
self.classes = ival_
elif nodeName_ == 'split':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'split')
self.split = fval_
elif nodeName_ == 'delimiter':
delimiter_ = child_.text
delimiter_ = self.gds_validate_string(delimiter_, node, 'delimiter')
self.delimiter = delimiter_
elif nodeName_ == 'algorithm':
obj_ = algorithmType4.factory()
obj_.build(child_)
self.algorithm = obj_
obj_.original_tagname_ = 'algorithm'
# end class prediction
class clauseType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, extensiontype_=None):
self.original_tagname_ = None
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if clauseType.subclass:
return clauseType.subclass(*args_, **kwargs_)
else:
return clauseType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='clauseType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='clauseType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='clauseType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='clauseType'):
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
pass
def exportChildren(self, outfile, level, namespace_='', name_='clauseType', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='clauseType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class clauseType
class greaterThanType(clauseType):
subclass = None
superclass = clauseType
def __init__(self, value2=None, value1=None):
self.original_tagname_ = None
super(greaterThanType, self).__init__()
self.value2 = _cast(None, value2)
self.value1 = _cast(None, value1)
def factory(*args_, **kwargs_):
if greaterThanType.subclass:
return greaterThanType.subclass(*args_, **kwargs_)
else:
return greaterThanType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_value2(self): return self.value2
def set_value2(self, value2): self.value2 = value2
def get_value1(self): return self.value1
def set_value1(self, value1): self.value1 = value1
def hasContent_(self):
if (
super(greaterThanType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='greaterThanType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='greaterThanType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='greaterThanType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='greaterThanType'):
super(greaterThanType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='greaterThanType')
if self.value2 is not None and 'value2' not in already_processed:
already_processed.add('value2')
outfile.write(' value2=%s' % (self.gds_format_string(quote_attrib(self.value2).encode(ExternalEncoding), input_name='value2'), ))
if self.value1 is not None and 'value1' not in already_processed:
already_processed.add('value1')
outfile.write(' value1=%s' % (self.gds_format_string(quote_attrib(self.value1).encode(ExternalEncoding), input_name='value1'), ))
def exportChildren(self, outfile, level, namespace_='', name_='greaterThanType', fromsubclass_=False, pretty_print=True):
super(greaterThanType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
pass
def exportLiteral(self, outfile, level, name_='greaterThanType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.value2 is not None and 'value2' not in already_processed:
already_processed.add('value2')
showIndent(outfile, level)
outfile.write('value2="%s",\n' % (self.value2,))
if self.value1 is not None and 'value1' not in already_processed:
already_processed.add('value1')
showIndent(outfile, level)
outfile.write('value1="%s",\n' % (self.value1,))
super(greaterThanType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(greaterThanType, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('value2', node)
if value is not None and 'value2' not in already_processed:
already_processed.add('value2')
self.value2 = value
value = find_attr_value_('value1', node)
if value is not None and 'value1' not in already_processed:
already_processed.add('value1')
self.value1 = value
super(greaterThanType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(greaterThanType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class greaterThanType
class greaterThanOrEqualType(clauseType):
subclass = None
superclass = clauseType
def __init__(self, value2=None, value1=None):
self.original_tagname_ = None
super(greaterThanOrEqualType, self).__init__()
self.value2 = _cast(None, value2)
self.value1 = _cast(None, value1)
def factory(*args_, **kwargs_):
if greaterThanOrEqualType.subclass:
return greaterThanOrEqualType.subclass(*args_, **kwargs_)
else:
return greaterThanOrEqualType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_value2(self): return self.value2
def set_value2(self, value2): self.value2 = value2
def get_value1(self): return self.value1
def set_value1(self, value1): self.value1 = value1
def hasContent_(self):
if (
super(greaterThanOrEqualType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='greaterThanOrEqualType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='greaterThanOrEqualType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='greaterThanOrEqualType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='greaterThanOrEqualType'):
super(greaterThanOrEqualType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='greaterThanOrEqualType')
if self.value2 is not None and 'value2' not in already_processed:
already_processed.add('value2')
outfile.write(' value2=%s' % (self.gds_format_string(quote_attrib(self.value2).encode(ExternalEncoding), input_name='value2'), ))
if self.value1 is not None and 'value1' not in already_processed:
already_processed.add('value1')
outfile.write(' value1=%s' % (self.gds_format_string(quote_attrib(self.value1).encode(ExternalEncoding), input_name='value1'), ))
def exportChildren(self, outfile, level, namespace_='', name_='greaterThanOrEqualType', fromsubclass_=False, pretty_print=True):
super(greaterThanOrEqualType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
pass
def exportLiteral(self, outfile, level, name_='greaterThanOrEqualType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.value2 is not None and 'value2' not in already_processed:
already_processed.add('value2')
showIndent(outfile, level)
outfile.write('value2="%s",\n' % (self.value2,))
if self.value1 is not None and 'value1' not in already_processed:
already_processed.add('value1')
showIndent(outfile, level)
outfile.write('value1="%s",\n' % (self.value1,))
super(greaterThanOrEqualType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(greaterThanOrEqualType, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('value2', node)
if value is not None and 'value2' not in already_processed:
already_processed.add('value2')
self.value2 = value
value = find_attr_value_('value1', node)
if value is not None and 'value1' not in already_processed:
already_processed.add('value1')
self.value1 = value
super(greaterThanOrEqualType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(greaterThanOrEqualType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class greaterThanOrEqualType
class lessThanType(clauseType):
subclass = None
superclass = clauseType
def __init__(self, value2=None, value1=None):
self.original_tagname_ = None
super(lessThanType, self).__init__()
self.value2 = _cast(None, value2)
self.value1 = _cast(None, value1)
def factory(*args_, **kwargs_):
if lessThanType.subclass:
return lessThanType.subclass(*args_, **kwargs_)
else:
return lessThanType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_value2(self): return self.value2
def set_value2(self, value2): self.value2 = value2
def get_value1(self): return self.value1
def set_value1(self, value1): self.value1 = value1
def hasContent_(self):
if (
super(lessThanType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='lessThanType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='lessThanType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='lessThanType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='lessThanType'):
super(lessThanType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='lessThanType')
if self.value2 is not None and 'value2' not in already_processed:
already_processed.add('value2')
outfile.write(' value2=%s' % (self.gds_format_string(quote_attrib(self.value2).encode(ExternalEncoding), input_name='value2'), ))
if self.value1 is not None and 'value1' not in already_processed:
already_processed.add('value1')
outfile.write(' value1=%s' % (self.gds_format_string(quote_attrib(self.value1).encode(ExternalEncoding), input_name='value1'), ))
def exportChildren(self, outfile, level, namespace_='', name_='lessThanType', fromsubclass_=False, pretty_print=True):
super(lessThanType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
pass
def exportLiteral(self, outfile, level, name_='lessThanType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.value2 is not None and 'value2' not in already_processed:
already_processed.add('value2')
showIndent(outfile, level)
outfile.write('value2="%s",\n' % (self.value2,))
if self.value1 is not None and 'value1' not in already_processed:
already_processed.add('value1')
showIndent(outfile, level)
outfile.write('value1="%s",\n' % (self.value1,))
super(lessThanType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(lessThanType, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('value2', node)
if value is not None and 'value2' not in already_processed:
already_processed.add('value2')
self.value2 = value
value = find_attr_value_('value1', node)
if value is not None and 'value1' not in already_processed:
already_processed.add('value1')
self.value1 = value
super(lessThanType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(lessThanType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class lessThanType
class lessThanOrEqualType(clauseType):
subclass = None
superclass = clauseType
def __init__(self, value2=None, value1=None):
self.original_tagname_ = None
super(lessThanOrEqualType, self).__init__()
self.value2 = _cast(None, value2)
self.value1 = _cast(None, value1)
def factory(*args_, **kwargs_):
if lessThanOrEqualType.subclass:
return lessThanOrEqualType.subclass(*args_, **kwargs_)
else:
return lessThanOrEqualType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_value2(self): return self.value2
def set_value2(self, value2): self.value2 = value2
def get_value1(self): return self.value1
def set_value1(self, value1): self.value1 = value1
def hasContent_(self):
if (
super(lessThanOrEqualType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='lessThanOrEqualType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='lessThanOrEqualType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='lessThanOrEqualType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='lessThanOrEqualType'):
super(lessThanOrEqualType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='lessThanOrEqualType')
if self.value2 is not None and 'value2' not in already_processed:
already_processed.add('value2')
outfile.write(' value2=%s' % (self.gds_format_string(quote_attrib(self.value2).encode(ExternalEncoding), input_name='value2'), ))
if self.value1 is not None and 'value1' not in already_processed:
already_processed.add('value1')
outfile.write(' value1=%s' % (self.gds_format_string(quote_attrib(self.value1).encode(ExternalEncoding), input_name='value1'), ))
def exportChildren(self, outfile, level, namespace_='', name_='lessThanOrEqualType', fromsubclass_=False, pretty_print=True):
super(lessThanOrEqualType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
pass
def exportLiteral(self, outfile, level, name_='lessThanOrEqualType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.value2 is not None and 'value2' not in already_processed:
already_processed.add('value2')
showIndent(outfile, level)
outfile.write('value2="%s",\n' % (self.value2,))
if self.value1 is not None and 'value1' not in already_processed:
already_processed.add('value1')
showIndent(outfile, level)
outfile.write('value1="%s",\n' % (self.value1,))
super(lessThanOrEqualType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(lessThanOrEqualType, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('value2', node)
if value is not None and 'value2' not in already_processed:
already_processed.add('value2')
self.value2 = value
value = find_attr_value_('value1', node)
if value is not None and 'value1' not in already_processed:
already_processed.add('value1')
self.value1 = value
super(lessThanOrEqualType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(lessThanOrEqualType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class lessThanOrEqualType
class equalType(clauseType):
subclass = None
superclass = clauseType
def __init__(self, value2=None, value1=None):
self.original_tagname_ = None
super(equalType, self).__init__()
self.value2 = _cast(None, value2)
self.value1 = _cast(None, value1)
def factory(*args_, **kwargs_):
if equalType.subclass:
return equalType.subclass(*args_, **kwargs_)
else:
return equalType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_value2(self): return self.value2
def set_value2(self, value2): self.value2 = value2
def get_value1(self): return self.value1
def set_value1(self, value1): self.value1 = value1
def hasContent_(self):
if (
super(equalType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='equalType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='equalType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='equalType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='equalType'):
super(equalType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='equalType')
if self.value2 is not None and 'value2' not in already_processed:
already_processed.add('value2')
outfile.write(' value2=%s' % (self.gds_format_string(quote_attrib(self.value2).encode(ExternalEncoding), input_name='value2'), ))
if self.value1 is not None and 'value1' not in already_processed:
already_processed.add('value1')
outfile.write(' value1=%s' % (self.gds_format_string(quote_attrib(self.value1).encode(ExternalEncoding), input_name='value1'), ))
def exportChildren(self, outfile, level, namespace_='', name_='equalType', fromsubclass_=False, pretty_print=True):
super(equalType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
pass
def exportLiteral(self, outfile, level, name_='equalType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.value2 is not None and 'value2' not in already_processed:
already_processed.add('value2')
showIndent(outfile, level)
outfile.write('value2="%s",\n' % (self.value2,))
if self.value1 is not None and 'value1' not in already_processed:
already_processed.add('value1')
showIndent(outfile, level)
outfile.write('value1="%s",\n' % (self.value1,))
super(equalType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(equalType, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('value2', node)
if value is not None and 'value2' not in already_processed:
already_processed.add('value2')
self.value2 = value
value = find_attr_value_('value1', node)
if value is not None and 'value1' not in already_processed:
already_processed.add('value1')
self.value1 = value
super(equalType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(equalType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class equalType
class notEqualType(clauseType):
subclass = None
superclass = clauseType
def __init__(self, value2=None, value1=None):
self.original_tagname_ = None
super(notEqualType, self).__init__()
self.value2 = _cast(None, value2)
self.value1 = _cast(None, value1)
def factory(*args_, **kwargs_):
if notEqualType.subclass:
return notEqualType.subclass(*args_, **kwargs_)
else:
return notEqualType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_value2(self): return self.value2
def set_value2(self, value2): self.value2 = value2
def get_value1(self): return self.value1
def set_value1(self, value1): self.value1 = value1
def hasContent_(self):
if (
super(notEqualType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='notEqualType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='notEqualType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='notEqualType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='notEqualType'):
super(notEqualType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='notEqualType')
if self.value2 is not None and 'value2' not in already_processed:
already_processed.add('value2')
outfile.write(' value2=%s' % (self.gds_format_string(quote_attrib(self.value2).encode(ExternalEncoding), input_name='value2'), ))
if self.value1 is not None and 'value1' not in already_processed:
already_processed.add('value1')
outfile.write(' value1=%s' % (self.gds_format_string(quote_attrib(self.value1).encode(ExternalEncoding), input_name='value1'), ))
def exportChildren(self, outfile, level, namespace_='', name_='notEqualType', fromsubclass_=False, pretty_print=True):
super(notEqualType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
pass
def exportLiteral(self, outfile, level, name_='notEqualType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.value2 is not None and 'value2' not in already_processed:
already_processed.add('value2')
showIndent(outfile, level)
outfile.write('value2="%s",\n' % (self.value2,))
if self.value1 is not None and 'value1' not in already_processed:
already_processed.add('value1')
showIndent(outfile, level)
outfile.write('value1="%s",\n' % (self.value1,))
super(notEqualType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(notEqualType, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('value2', node)
if value is not None and 'value2' not in already_processed:
already_processed.add('value2')
self.value2 = value
value = find_attr_value_('value1', node)
if value is not None and 'value1' not in already_processed:
already_processed.add('value1')
self.value1 = value
super(notEqualType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(notEqualType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class notEqualType
class betweenType(clauseType):
subclass = None
superclass = clauseType
def __init__(self, max=None, value=None, min=None):
self.original_tagname_ = None
super(betweenType, self).__init__()
self.max = _cast(None, max)
self.value = _cast(None, value)
self.min = _cast(None, min)
def factory(*args_, **kwargs_):
if betweenType.subclass:
return betweenType.subclass(*args_, **kwargs_)
else:
return betweenType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_max(self): return self.max
def set_max(self, max): self.max = max
def get_value(self): return self.value
def set_value(self, value): self.value = value
def get_min(self): return self.min
def set_min(self, min): self.min = min
def hasContent_(self):
if (
super(betweenType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='betweenType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='betweenType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='betweenType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='betweenType'):
super(betweenType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='betweenType')
if self.max is not None and 'max' not in already_processed:
already_processed.add('max')
outfile.write(' max=%s' % (self.gds_format_string(quote_attrib(self.max).encode(ExternalEncoding), input_name='max'), ))
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
outfile.write(' value=%s' % (self.gds_format_string(quote_attrib(self.value).encode(ExternalEncoding), input_name='value'), ))
if self.min is not None and 'min' not in already_processed:
already_processed.add('min')
outfile.write(' min=%s' % (self.gds_format_string(quote_attrib(self.min).encode(ExternalEncoding), input_name='min'), ))
def exportChildren(self, outfile, level, namespace_='', name_='betweenType', fromsubclass_=False, pretty_print=True):
super(betweenType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
pass
def exportLiteral(self, outfile, level, name_='betweenType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.max is not None and 'max' not in already_processed:
already_processed.add('max')
showIndent(outfile, level)
outfile.write('max="%s",\n' % (self.max,))
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
showIndent(outfile, level)
outfile.write('value="%s",\n' % (self.value,))
if self.min is not None and 'min' not in already_processed:
already_processed.add('min')
showIndent(outfile, level)
outfile.write('min="%s",\n' % (self.min,))
super(betweenType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(betweenType, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('max', node)
if value is not None and 'max' not in already_processed:
already_processed.add('max')
self.max = value
value = find_attr_value_('value', node)
if value is not None and 'value' not in already_processed:
already_processed.add('value')
self.value = value
value = find_attr_value_('min', node)
if value is not None and 'min' not in already_processed:
already_processed.add('min')
self.min = value
super(betweenType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(betweenType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class betweenType
class notBetweenType(clauseType):
subclass = None
superclass = clauseType
def __init__(self, max=None, value=None, min=None):
self.original_tagname_ = None
super(notBetweenType, self).__init__()
self.max = _cast(None, max)
self.value = _cast(None, value)
self.min = _cast(None, min)
def factory(*args_, **kwargs_):
if notBetweenType.subclass:
return notBetweenType.subclass(*args_, **kwargs_)
else:
return notBetweenType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_max(self): return self.max
def set_max(self, max): self.max = max
def get_value(self): return self.value
def set_value(self, value): self.value = value
def get_min(self): return self.min
def set_min(self, min): self.min = min
def hasContent_(self):
if (
super(notBetweenType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='notBetweenType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='notBetweenType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='notBetweenType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='notBetweenType'):
super(notBetweenType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='notBetweenType')
if self.max is not None and 'max' not in already_processed:
already_processed.add('max')
outfile.write(' max=%s' % (self.gds_format_string(quote_attrib(self.max).encode(ExternalEncoding), input_name='max'), ))
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
outfile.write(' value=%s' % (self.gds_format_string(quote_attrib(self.value).encode(ExternalEncoding), input_name='value'), ))
if self.min is not None and 'min' not in already_processed:
already_processed.add('min')
outfile.write(' min=%s' % (self.gds_format_string(quote_attrib(self.min).encode(ExternalEncoding), input_name='min'), ))
def exportChildren(self, outfile, level, namespace_='', name_='notBetweenType', fromsubclass_=False, pretty_print=True):
super(notBetweenType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
pass
def exportLiteral(self, outfile, level, name_='notBetweenType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.max is not None and 'max' not in already_processed:
already_processed.add('max')
showIndent(outfile, level)
outfile.write('max="%s",\n' % (self.max,))
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
showIndent(outfile, level)
outfile.write('value="%s",\n' % (self.value,))
if self.min is not None and 'min' not in already_processed:
already_processed.add('min')
showIndent(outfile, level)
outfile.write('min="%s",\n' % (self.min,))
super(notBetweenType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(notBetweenType, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('max', node)
if value is not None and 'max' not in already_processed:
already_processed.add('max')
self.max = value
value = find_attr_value_('value', node)
if value is not None and 'value' not in already_processed:
already_processed.add('value')
self.value = value
value = find_attr_value_('min', node)
if value is not None and 'min' not in already_processed:
already_processed.add('min')
self.min = value
super(notBetweenType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(notBetweenType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class notBetweenType
class orType(clauseType):
subclass = None
superclass = clauseType
def __init__(self, clause=None):
self.original_tagname_ = None
super(orType, self).__init__()
if clause is None:
self.clause = []
else:
self.clause = clause
def factory(*args_, **kwargs_):
if orType.subclass:
return orType.subclass(*args_, **kwargs_)
else:
return orType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_clause(self): return self.clause
def set_clause(self, clause): self.clause = clause
def add_clause(self, value): self.clause.append(value)
def insert_clause_at(self, index, value): self.clause.insert(index, value)
def replace_clause_at(self, index, value): self.clause[index] = value
def hasContent_(self):
if (
self.clause or
super(orType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='orType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='orType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='orType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='orType'):
super(orType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='orType')
def exportChildren(self, outfile, level, namespace_='', name_='orType', fromsubclass_=False, pretty_print=True):
super(orType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for clause_ in self.clause:
clause_.export(outfile, level, namespace_, name_='clause', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='orType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(orType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(orType, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('clause=[\n')
level += 1
for clause_ in self.clause:
showIndent(outfile, level)
outfile.write('model_.clauseType(\n')
clause_.exportLiteral(outfile, level, name_='clauseType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(orType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'clause':
type_name_ = child_.attrib.get(
'{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <clause> element')
self.clause.append(obj_)
obj_.original_tagname_ = 'clause'
super(orType, self).buildChildren(child_, node, nodeName_, True)
# end class orType
class andType(clauseType):
subclass = None
superclass = clauseType
def __init__(self, clause=None):
self.original_tagname_ = None
super(andType, self).__init__()
if clause is None:
self.clause = []
else:
self.clause = clause
def factory(*args_, **kwargs_):
if andType.subclass:
return andType.subclass(*args_, **kwargs_)
else:
return andType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_clause(self): return self.clause
def set_clause(self, clause): self.clause = clause
def add_clause(self, value): self.clause.append(value)
def insert_clause_at(self, index, value): self.clause.insert(index, value)
def replace_clause_at(self, index, value): self.clause[index] = value
def hasContent_(self):
if (
self.clause or
super(andType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='andType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='andType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='andType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='andType'):
super(andType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='andType')
def exportChildren(self, outfile, level, namespace_='', name_='andType', fromsubclass_=False, pretty_print=True):
super(andType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for clause_ in self.clause:
clause_.export(outfile, level, namespace_, name_='clause', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='andType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(andType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(andType, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('clause=[\n')
level += 1
for clause_ in self.clause:
showIndent(outfile, level)
outfile.write('model_.clauseType(\n')
clause_.exportLiteral(outfile, level, name_='clauseType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(andType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'clause':
type_name_ = child_.attrib.get(
'{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <clause> element')
self.clause.append(obj_)
obj_.original_tagname_ = 'clause'
super(andType, self).buildChildren(child_, node, nodeName_, True)
# end class andType
class factType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, name=None, extensiontype_=None):
self.original_tagname_ = None
self.name = _cast(None, name)
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if factType.subclass:
return factType.subclass(*args_, **kwargs_)
else:
return factType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='factType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='factType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='factType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='factType'):
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='factType', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='factType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class factType
class predicateType(factType):
subclass = None
superclass = factType
def __init__(self, name=None, value=None):
self.original_tagname_ = None
super(predicateType, self).__init__(name, )
self.value = _cast(None, value)
def factory(*args_, **kwargs_):
if predicateType.subclass:
return predicateType.subclass(*args_, **kwargs_)
else:
return predicateType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_value(self): return self.value
def set_value(self, value): self.value = value
def hasContent_(self):
if (
super(predicateType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='predicateType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='predicateType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='predicateType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='predicateType'):
super(predicateType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='predicateType')
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
outfile.write(' value=%s' % (self.gds_format_string(quote_attrib(self.value).encode(ExternalEncoding), input_name='value'), ))
def exportChildren(self, outfile, level, namespace_='', name_='predicateType', fromsubclass_=False, pretty_print=True):
super(predicateType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
pass
def exportLiteral(self, outfile, level, name_='predicateType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
showIndent(outfile, level)
outfile.write('value="%s",\n' % (self.value,))
super(predicateType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(predicateType, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('value', node)
if value is not None and 'value' not in already_processed:
already_processed.add('value')
self.value = value
super(predicateType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(predicateType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class predicateType
class structType(factType):
subclass = None
superclass = factType
def __init__(self, name=None, comment=None, field=None):
self.original_tagname_ = None
super(structType, self).__init__(name, )
self.comment = comment
if field is None:
self.field = []
else:
self.field = field
def factory(*args_, **kwargs_):
if structType.subclass:
return structType.subclass(*args_, **kwargs_)
else:
return structType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_comment(self): return self.comment
def set_comment(self, comment): self.comment = comment
def get_field(self): return self.field
def set_field(self, field): self.field = field
def add_field(self, value): self.field.append(value)
def insert_field_at(self, index, value): self.field.insert(index, value)
def replace_field_at(self, index, value): self.field[index] = value
def hasContent_(self):
if (
self.comment is not None or
self.field or
super(structType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='structType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='structType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='structType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='structType'):
super(structType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='structType')
def exportChildren(self, outfile, level, namespace_='', name_='structType', fromsubclass_=False, pretty_print=True):
super(structType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.comment is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scomment>%s</%scomment>%s' % (namespace_, self.gds_format_string(quote_xml(self.comment).encode(ExternalEncoding), input_name='comment'), namespace_, eol_))
for field_ in self.field:
field_.export(outfile, level, namespace_, name_='field', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='structType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(structType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(structType, self).exportLiteralChildren(outfile, level, name_)
if self.comment is not None:
showIndent(outfile, level)
outfile.write('comment=%s,\n' % quote_python(self.comment).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('field=[\n')
level += 1
for field_ in self.field:
showIndent(outfile, level)
outfile.write('model_.fieldType(\n')
field_.exportLiteral(outfile, level, name_='fieldType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(structType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'comment':
comment_ = child_.text
comment_ = self.gds_validate_string(comment_, node, 'comment')
self.comment = comment_
elif nodeName_ == 'field':
obj_ = fieldType.factory()
obj_.build(child_)
self.field.append(obj_)
obj_.original_tagname_ = 'field'
super(structType, self).buildChildren(child_, node, nodeName_, True)
# end class structType
class instanceType(factType):
subclass = None
superclass = factType
def __init__(self, name=None, type_=None, comment=None, field=None):
self.original_tagname_ = None
super(instanceType, self).__init__(name, )
self.type_ = _cast(None, type_)
self.comment = comment
if field is None:
self.field = []
else:
self.field = field
def factory(*args_, **kwargs_):
if instanceType.subclass:
return instanceType.subclass(*args_, **kwargs_)
else:
return instanceType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_comment(self): return self.comment
def set_comment(self, comment): self.comment = comment
def get_field(self): return self.field
def set_field(self, field): self.field = field
def add_field(self, value): self.field.append(value)
def insert_field_at(self, index, value): self.field.insert(index, value)
def replace_field_at(self, index, value): self.field[index] = value
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def hasContent_(self):
if (
self.comment is not None or
self.field or
super(instanceType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='instanceType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='instanceType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='instanceType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='instanceType'):
super(instanceType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='instanceType')
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), ))
def exportChildren(self, outfile, level, namespace_='', name_='instanceType', fromsubclass_=False, pretty_print=True):
super(instanceType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.comment is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scomment>%s</%scomment>%s' % (namespace_, self.gds_format_string(quote_xml(self.comment).encode(ExternalEncoding), input_name='comment'), namespace_, eol_))
for field_ in self.field:
field_.export(outfile, level, namespace_, name_='field', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='instanceType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
super(instanceType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(instanceType, self).exportLiteralChildren(outfile, level, name_)
if self.comment is not None:
showIndent(outfile, level)
outfile.write('comment=%s,\n' % quote_python(self.comment).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('field=[\n')
level += 1
for field_ in self.field:
showIndent(outfile, level)
outfile.write('model_.fieldType5(\n')
field_.exportLiteral(outfile, level, name_='fieldType5')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
super(instanceType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'comment':
comment_ = child_.text
comment_ = self.gds_validate_string(comment_, node, 'comment')
self.comment = comment_
elif nodeName_ == 'field':
obj_ = fieldType5.factory()
obj_.build(child_)
self.field.append(obj_)
obj_.original_tagname_ = 'field'
super(instanceType, self).buildChildren(child_, node, nodeName_, True)
# end class instanceType
class actionType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, extensiontype_=None):
self.original_tagname_ = None
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if actionType.subclass:
return actionType.subclass(*args_, **kwargs_)
else:
return actionType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='actionType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='actionType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='actionType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='actionType'):
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
pass
def exportChildren(self, outfile, level, namespace_='', name_='actionType', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='actionType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class actionType
class setType(actionType):
subclass = None
superclass = actionType
def __init__(self, name=None, value=None):
self.original_tagname_ = None
super(setType, self).__init__()
self.name = _cast(None, name)
self.value = _cast(None, value)
def factory(*args_, **kwargs_):
if setType.subclass:
return setType.subclass(*args_, **kwargs_)
else:
return setType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_value(self): return self.value
def set_value(self, value): self.value = value
def hasContent_(self):
if (
super(setType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='setType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='setType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='setType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='setType'):
super(setType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='setType')
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
outfile.write(' value=%s' % (self.gds_format_string(quote_attrib(self.value).encode(ExternalEncoding), input_name='value'), ))
def exportChildren(self, outfile, level, namespace_='', name_='setType', fromsubclass_=False, pretty_print=True):
super(setType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
pass
def exportLiteral(self, outfile, level, name_='setType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
showIndent(outfile, level)
outfile.write('value="%s",\n' % (self.value,))
super(setType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(setType, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('value', node)
if value is not None and 'value' not in already_processed:
already_processed.add('value')
self.value = value
super(setType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(setType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class setType
class runRuleType(actionType):
subclass = None
superclass = actionType
def __init__(self, name=None, argument=None):
self.original_tagname_ = None
super(runRuleType, self).__init__()
self.name = _cast(None, name)
if argument is None:
self.argument = []
else:
self.argument = argument
def factory(*args_, **kwargs_):
if runRuleType.subclass:
return runRuleType.subclass(*args_, **kwargs_)
else:
return runRuleType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_argument(self): return self.argument
def set_argument(self, argument): self.argument = argument
def add_argument(self, value): self.argument.append(value)
def insert_argument_at(self, index, value): self.argument.insert(index, value)
def replace_argument_at(self, index, value): self.argument[index] = value
def get_name(self): return self.name
def set_name(self, name): self.name = name
def hasContent_(self):
if (
self.argument or
super(runRuleType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='runRuleType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='runRuleType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='runRuleType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='runRuleType'):
super(runRuleType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='runRuleType')
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
def exportChildren(self, outfile, level, namespace_='', name_='runRuleType', fromsubclass_=False, pretty_print=True):
super(runRuleType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for argument_ in self.argument:
argument_.export(outfile, level, namespace_, name_='argument', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='runRuleType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
super(runRuleType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(runRuleType, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('argument=[\n')
level += 1
for argument_ in self.argument:
showIndent(outfile, level)
outfile.write('model_.argumentType(\n')
argument_.exportLiteral(outfile, level, name_='argumentType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
super(runRuleType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'argument':
obj_ = argumentType.factory()
obj_.build(child_)
self.argument.append(obj_)
obj_.original_tagname_ = 'argument'
super(runRuleType, self).buildChildren(child_, node, nodeName_, True)
# end class runRuleType
class ExpertSystem(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, comment=None, fact=None, rule=None):
self.original_tagname_ = None
if comment is None:
self.comment = []
else:
self.comment = comment
if fact is None:
self.fact = []
else:
self.fact = fact
if rule is None:
self.rule = []
else:
self.rule = rule
def factory(*args_, **kwargs_):
if ExpertSystem.subclass:
return ExpertSystem.subclass(*args_, **kwargs_)
else:
return ExpertSystem(*args_, **kwargs_)
factory = staticmethod(factory)
def get_comment(self): return self.comment
def set_comment(self, comment): self.comment = comment
def add_comment(self, value): self.comment.append(value)
def insert_comment_at(self, index, value): self.comment.insert(index, value)
def replace_comment_at(self, index, value): self.comment[index] = value
def get_fact(self): return self.fact
def set_fact(self, fact): self.fact = fact
def add_fact(self, value): self.fact.append(value)
def insert_fact_at(self, index, value): self.fact.insert(index, value)
def replace_fact_at(self, index, value): self.fact[index] = value
def get_rule(self): return self.rule
def set_rule(self, rule): self.rule = rule
def add_rule(self, value): self.rule.append(value)
def insert_rule_at(self, index, value): self.rule.insert(index, value)
def replace_rule_at(self, index, value): self.rule[index] = value
def hasContent_(self):
if (
self.comment or
self.fact or
self.rule
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ExpertSystem', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ExpertSystem')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ExpertSystem', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ExpertSystem'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ExpertSystem', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for comment_ in self.comment:
showIndent(outfile, level, pretty_print)
outfile.write('<%scomment>%s</%scomment>%s' % (namespace_, self.gds_format_string(quote_xml(comment_).encode(ExternalEncoding), input_name='comment'), namespace_, eol_))
for fact_ in self.fact:
fact_.export(outfile, level, namespace_, name_='fact', pretty_print=pretty_print)
for rule_ in self.rule:
rule_.export(outfile, level, namespace_, name_='rule', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ExpertSystem'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('comment=[\n')
level += 1
for comment_ in self.comment:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(comment_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('fact=[\n')
level += 1
for fact_ in self.fact:
showIndent(outfile, level)
outfile.write('model_.factType(\n')
fact_.exportLiteral(outfile, level, name_='factType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('rule=[\n')
level += 1
for rule_ in self.rule:
showIndent(outfile, level)
outfile.write('model_.ruleType(\n')
rule_.exportLiteral(outfile, level, name_='ruleType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'comment':
comment_ = child_.text
comment_ = self.gds_validate_string(comment_, node, 'comment')
self.comment.append(comment_)
elif nodeName_ == 'fact':
type_name_ = child_.attrib.get(
'{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <fact> element')
self.fact.append(obj_)
obj_.original_tagname_ = 'fact'
elif nodeName_ == 'rule':
obj_ = ruleType.factory()
obj_.build(child_)
self.rule.append(obj_)
obj_.original_tagname_ = 'rule'
# end class ExpertSystem
class algorithmType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, RadialBasisFunctionNetwork=None, MultiLayerPerceptron=None):
self.original_tagname_ = None
self.RadialBasisFunctionNetwork = RadialBasisFunctionNetwork
self.MultiLayerPerceptron = MultiLayerPerceptron
def factory(*args_, **kwargs_):
if algorithmType.subclass:
return algorithmType.subclass(*args_, **kwargs_)
else:
return algorithmType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_RadialBasisFunctionNetwork(self): return self.RadialBasisFunctionNetwork
def set_RadialBasisFunctionNetwork(self, RadialBasisFunctionNetwork): self.RadialBasisFunctionNetwork = RadialBasisFunctionNetwork
def get_MultiLayerPerceptron(self): return self.MultiLayerPerceptron
def set_MultiLayerPerceptron(self, MultiLayerPerceptron): self.MultiLayerPerceptron = MultiLayerPerceptron
def hasContent_(self):
if (
self.RadialBasisFunctionNetwork is not None or
self.MultiLayerPerceptron is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='algorithmType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='algorithmType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='algorithmType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='algorithmType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='algorithmType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.RadialBasisFunctionNetwork is not None:
self.RadialBasisFunctionNetwork.export(outfile, level, namespace_, name_='RadialBasisFunctionNetwork', pretty_print=pretty_print)
if self.MultiLayerPerceptron is not None:
self.MultiLayerPerceptron.export(outfile, level, namespace_, name_='MultiLayerPerceptron', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='algorithmType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.RadialBasisFunctionNetwork is not None:
showIndent(outfile, level)
outfile.write('RadialBasisFunctionNetwork=model_.RadialBasisFunctionNetwork(\n')
self.RadialBasisFunctionNetwork.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.MultiLayerPerceptron is not None:
showIndent(outfile, level)
outfile.write('MultiLayerPerceptron=model_.MultiLayerPerceptron(\n')
self.MultiLayerPerceptron.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'RadialBasisFunctionNetwork':
obj_ = RadialBasisFunctionNetwork.factory()
obj_.build(child_)
self.RadialBasisFunctionNetwork = obj_
obj_.original_tagname_ = 'RadialBasisFunctionNetwork'
elif nodeName_ == 'MultiLayerPerceptron':
obj_ = MultiLayerPerceptron.factory()
obj_.build(child_)
self.MultiLayerPerceptron = obj_
obj_.original_tagname_ = 'MultiLayerPerceptron'
# end class algorithmType
class algorithmType4(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, RecurrentNeuralNetwork=None):
self.original_tagname_ = None
self.RecurrentNeuralNetwork = RecurrentNeuralNetwork
def factory(*args_, **kwargs_):
if algorithmType4.subclass:
return algorithmType4.subclass(*args_, **kwargs_)
else:
return algorithmType4(*args_, **kwargs_)
factory = staticmethod(factory)
def get_RecurrentNeuralNetwork(self): return self.RecurrentNeuralNetwork
def set_RecurrentNeuralNetwork(self, RecurrentNeuralNetwork): self.RecurrentNeuralNetwork = RecurrentNeuralNetwork
def hasContent_(self):
if (
self.RecurrentNeuralNetwork is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='algorithmType4', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='algorithmType4')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='algorithmType4', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='algorithmType4'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='algorithmType4', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.RecurrentNeuralNetwork is not None:
self.RecurrentNeuralNetwork.export(outfile, level, namespace_, name_='RecurrentNeuralNetwork', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='algorithmType4'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.RecurrentNeuralNetwork is not None:
showIndent(outfile, level)
outfile.write('RecurrentNeuralNetwork=model_.RecurrentNeuralNetwork(\n')
self.RecurrentNeuralNetwork.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'RecurrentNeuralNetwork':
obj_ = RecurrentNeuralNetwork.factory()
obj_.build(child_)
self.RecurrentNeuralNetwork = obj_
obj_.original_tagname_ = 'RecurrentNeuralNetwork'
# end class algorithmType4
class fieldType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, initialValue=None, type_=None, name=None):
self.original_tagname_ = None
self.initialValue = _cast(None, initialValue)
self.type_ = _cast(None, type_)
self.name = _cast(None, name)
def factory(*args_, **kwargs_):
if fieldType.subclass:
return fieldType.subclass(*args_, **kwargs_)
else:
return fieldType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_initialValue(self): return self.initialValue
def set_initialValue(self, initialValue): self.initialValue = initialValue
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_name(self): return self.name
def set_name(self, name): self.name = name
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='fieldType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='fieldType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='fieldType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='fieldType'):
if self.initialValue is not None and 'initialValue' not in already_processed:
already_processed.add('initialValue')
outfile.write(' initialValue=%s' % (self.gds_format_string(quote_attrib(self.initialValue).encode(ExternalEncoding), input_name='initialValue'), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
def exportChildren(self, outfile, level, namespace_='', name_='fieldType', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='fieldType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.initialValue is not None and 'initialValue' not in already_processed:
already_processed.add('initialValue')
showIndent(outfile, level)
outfile.write('initialValue="%s",\n' % (self.initialValue,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('initialValue', node)
if value is not None and 'initialValue' not in already_processed:
already_processed.add('initialValue')
self.initialValue = value
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class fieldType
class fieldType5(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, name=None, value=None):
self.original_tagname_ = None
self.name = _cast(None, name)
self.value = _cast(None, value)
def factory(*args_, **kwargs_):
if fieldType5.subclass:
return fieldType5.subclass(*args_, **kwargs_)
else:
return fieldType5(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_value(self): return self.value
def set_value(self, value): self.value = value
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='fieldType5', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='fieldType5')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='fieldType5', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='fieldType5'):
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
outfile.write(' value=%s' % (self.gds_format_string(quote_attrib(self.value).encode(ExternalEncoding), input_name='value'), ))
def exportChildren(self, outfile, level, namespace_='', name_='fieldType5', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='fieldType5'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
showIndent(outfile, level)
outfile.write('value="%s",\n' % (self.value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('value', node)
if value is not None and 'value' not in already_processed:
already_processed.add('value')
self.value = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class fieldType5
class argumentType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, name=None, value=None):
self.original_tagname_ = None
self.name = _cast(None, name)
self.value = _cast(None, value)
def factory(*args_, **kwargs_):
if argumentType.subclass:
return argumentType.subclass(*args_, **kwargs_)
else:
return argumentType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_value(self): return self.value
def set_value(self, value): self.value = value
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='argumentType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='argumentType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='argumentType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='argumentType'):
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
outfile.write(' value=%s' % (self.gds_format_string(quote_attrib(self.value).encode(ExternalEncoding), input_name='value'), ))
def exportChildren(self, outfile, level, namespace_='', name_='argumentType', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='argumentType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
showIndent(outfile, level)
outfile.write('value="%s",\n' % (self.value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('value', node)
if value is not None and 'value' not in already_processed:
already_processed.add('value')
self.value = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class argumentType
class ruleType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, name=None, comment=None, parameter=None, if_=None, then=None, else_=None):
self.original_tagname_ = None
self.name = _cast(None, name)
self.comment = comment
if parameter is None:
self.parameter = []
else:
self.parameter = parameter
self.if_ = if_
self.then = then
self.else_ = else_
def factory(*args_, **kwargs_):
if ruleType.subclass:
return ruleType.subclass(*args_, **kwargs_)
else:
return ruleType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_comment(self): return self.comment
def set_comment(self, comment): self.comment = comment
def get_parameter(self): return self.parameter
def set_parameter(self, parameter): self.parameter = parameter
def add_parameter(self, value): self.parameter.append(value)
def insert_parameter_at(self, index, value): self.parameter.insert(index, value)
def replace_parameter_at(self, index, value): self.parameter[index] = value
def get_if(self): return self.if_
def set_if(self, if_): self.if_ = if_
def get_then(self): return self.then
def set_then(self, then): self.then = then
def get_else(self): return self.else_
def set_else(self, else_): self.else_ = else_
def get_name(self): return self.name
def set_name(self, name): self.name = name
def hasContent_(self):
if (
self.comment is not None or
self.parameter or
self.if_ is not None or
self.then is not None or
self.else_ is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ruleType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ruleType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ruleType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ruleType'):
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
def exportChildren(self, outfile, level, namespace_='', name_='ruleType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.comment is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scomment>%s</%scomment>%s' % (namespace_, self.gds_format_string(quote_xml(self.comment).encode(ExternalEncoding), input_name='comment'), namespace_, eol_))
for parameter_ in self.parameter:
parameter_.export(outfile, level, namespace_, name_='parameter', pretty_print=pretty_print)
if self.if_ is not None:
self.if_.export(outfile, level, namespace_, name_='if', pretty_print=pretty_print)
if self.then is not None:
self.then.export(outfile, level, namespace_, name_='then', pretty_print=pretty_print)
if self.else_ is not None:
self.else_.export(outfile, level, namespace_, name_='else', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ruleType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
if self.comment is not None:
showIndent(outfile, level)
outfile.write('comment=%s,\n' % quote_python(self.comment).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('parameter=[\n')
level += 1
for parameter_ in self.parameter:
showIndent(outfile, level)
outfile.write('model_.parameterType(\n')
parameter_.exportLiteral(outfile, level, name_='parameterType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.if_ is not None:
showIndent(outfile, level)
outfile.write('if_=model_.ifType(\n')
self.if_.exportLiteral(outfile, level, name_='if')
showIndent(outfile, level)
outfile.write('),\n')
if self.then is not None:
showIndent(outfile, level)
outfile.write('then=model_.thenType(\n')
self.then.exportLiteral(outfile, level, name_='then')
showIndent(outfile, level)
outfile.write('),\n')
if self.else_ is not None:
showIndent(outfile, level)
outfile.write('else_=model_.elseType(\n')
self.else_.exportLiteral(outfile, level, name_='else')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'comment':
comment_ = child_.text
comment_ = self.gds_validate_string(comment_, node, 'comment')
self.comment = comment_
elif nodeName_ == 'parameter':
obj_ = parameterType.factory()
obj_.build(child_)
self.parameter.append(obj_)
obj_.original_tagname_ = 'parameter'
elif nodeName_ == 'if':
obj_ = ifType.factory()
obj_.build(child_)
self.if_ = obj_
obj_.original_tagname_ = 'if'
elif nodeName_ == 'then':
obj_ = thenType.factory()
obj_.build(child_)
self.then = obj_
obj_.original_tagname_ = 'then'
elif nodeName_ == 'else':
obj_ = elseType.factory()
obj_.build(child_)
self.else_ = obj_
obj_.original_tagname_ = 'else'
# end class ruleType
class parameterType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, type_=None, name=None):
self.original_tagname_ = None
self.type_ = _cast(None, type_)
self.name = _cast(None, name)
def factory(*args_, **kwargs_):
if parameterType.subclass:
return parameterType.subclass(*args_, **kwargs_)
else:
return parameterType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_name(self): return self.name
def set_name(self, name): self.name = name
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='parameterType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='parameterType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='parameterType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='parameterType'):
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
def exportChildren(self, outfile, level, namespace_='', name_='parameterType', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='parameterType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class parameterType
class ifType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, clause=None):
self.original_tagname_ = None
self.clause = clause
def factory(*args_, **kwargs_):
if ifType.subclass:
return ifType.subclass(*args_, **kwargs_)
else:
return ifType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_clause(self): return self.clause
def set_clause(self, clause): self.clause = clause
def hasContent_(self):
if (
self.clause is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ifType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ifType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ifType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ifType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ifType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.clause is not None:
self.clause.export(outfile, level, namespace_, name_='clause', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ifType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.clauseType is not None:
showIndent(outfile, level)
outfile.write('clauseType=model_.clauseType(\n')
self.clauseType.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'clause':
type_name_ = child_.attrib.get(
'{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <clause> element')
self.clause = obj_
obj_.original_tagname_ = 'clause'
# end class ifType
class thenType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, action=None):
self.original_tagname_ = None
self.action = action
def factory(*args_, **kwargs_):
if thenType.subclass:
return thenType.subclass(*args_, **kwargs_)
else:
return thenType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_action(self): return self.action
def set_action(self, action): self.action = action
def hasContent_(self):
if (
self.action is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='thenType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='thenType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='thenType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='thenType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='thenType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.action is not None:
self.action.export(outfile, level, namespace_, name_='action', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='thenType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.actionType is not None:
showIndent(outfile, level)
outfile.write('actionType=model_.actionType(\n')
self.actionType.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'action':
type_name_ = child_.attrib.get(
'{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <action> element')
self.action = obj_
obj_.original_tagname_ = 'action'
# end class thenType
class elseType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, action=None):
self.original_tagname_ = None
self.action = action
def factory(*args_, **kwargs_):
if elseType.subclass:
return elseType.subclass(*args_, **kwargs_)
else:
return elseType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_action(self): return self.action
def set_action(self, action): self.action = action
def hasContent_(self):
if (
self.action is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='elseType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='elseType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='elseType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='elseType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='elseType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.action is not None:
self.action.export(outfile, level, namespace_, name_='action', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='elseType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.actionType is not None:
showIndent(outfile, level)
outfile.write('actionType=model_.actionType(\n')
self.actionType.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'action':
type_name_ = child_.attrib.get(
'{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <action> element')
self.action = obj_
obj_.original_tagname_ = 'action'
# end class elseType
GDSClassesMapping = {
'then': thenType,
'algorithm': algorithmType4,
'clause': clauseType,
'argument': argumentType,
'rule': ruleType,
'else': elseType,
'field': fieldType5,
'action': actionType,
'parameter': parameterType,
'fact': factType,
'if': ifType,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print(USAGE_TEXT)
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'MeetCI'
rootClass = MeetCI
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='',
pretty_print=True)
return rootObj
def parseEtree(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'MeetCI'
rootClass = MeetCI
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
mapping = {}
rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping)
reverse_mapping = rootObj.gds_reverse_node_mapping(mapping)
if not silence:
content = etree_.tostring(
rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement, mapping, reverse_mapping
def parseString(inString, silence=False):
from StringIO import StringIO
parser = None
doc = parsexml_(StringIO(inString), parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'MeetCI'
rootClass = MeetCI
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='')
return rootObj
def parseLiteral(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'MeetCI'
rootClass = MeetCI
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('#from meetCI import *\n\n')
sys.stdout.write('import meetCI as model_\n\n')
sys.stdout.write('rootObj = model_.rootClass(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"ExpertSystem",
"MachineLearning",
"MeetCI",
"MultiLayerPerceptron",
"RadialBasisFunctionNetwork",
"RandomForest",
"RecurrentNeuralNetwork",
"SupportVectorMachine",
"actionType",
"algorithmType",
"algorithmType4",
"andType",
"argumentType",
"betweenType",
"classification",
"clauseType",
"elseType",
"equalType",
"factType",
"fieldType",
"fieldType5",
"greaterThanOrEqualType",
"greaterThanType",
"ifType",
"instanceType",
"lessThanOrEqualType",
"lessThanType",
"notBetweenType",
"notEqualType",
"orType",
"parameterType",
"predicateType",
"prediction",
"ruleType",
"runRuleType",
"setType",
"structType",
"thenType"
]
|
RathinakumarVisweswaran/MeetCI
|
python/meetCI.py
|
Python
|
mit
| 246,019
|
[
"Gaussian"
] |
1bef7ea42946ac9355c5e3a19977f82c9e0eec4cd7777b140c83c17e52590397
|
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
from cerbero.build.filesprovider import FilesProvider
from cerbero.enums import License, Platform
from cerbero.packages import PackageType
from cerbero.utils import remove_list_duplicates
class PackageBase(object):
'''
Base class for packages with the common field to describe a package
@cvar name: name of the package
@type name: str
@cvar shortdesc: Short description of the package
@type shortdesc: str
@cvar longdesc: Long description of the package
@type longdesc: str
@cvar version: version of the package
@type version: str
@cvar codename: codename of the release
@type codename: str
@cvar uuid: unique id for this package
@type uuid: str
@cvar license: package license
@type license: License
@cvar vendor: vendor for this package
@type vendor: str
@cvar org: organization for this package (eg: net.foo.bar)
@type org: str
@cvar url: url for this pacakge
@type url: str
@cvar sys_deps: system dependencies for this package
@type sys_deps: dict
@cvar sys_deps_devel: development system dependencies for this package
@type sys_deps_devel: dict
@cvar ignore_package_prefix: don't use the package prefix set in the config
@type ignore_package_prefix: bool
@cvar resources_license: filename of the .txt license file
@type resources_license: str
@cvar resources_license_unwrapped: filename of the .txt license file
withouth the 80 chars wrapping
@type resources_license_unwrapped: str
@cvar resources_license_rtf: filename of .rtf license file
@type resources_license_rtf: str
@cvar resources_icon: filename of the .ico icon
@type resources_icon: str
@cvar resources_icon_icns: filename of the .icsn icon
@type resources_icon_icns: str
@cvar resources_backgound = filename of the background image
@type resources_backgound = str
@cvar resources_preinstall = filename for the pre-installation script
@type resources_preinstall = str
'''
name = 'default'
shortdesc = 'default'
longdesc = 'default'
version = '1.0'
codename = None
org = 'default'
uuid = None
license = License.GPL
vendor = 'default'
url = 'default'
ignore_package_prefix = False
sys_deps = {}
sys_deps_devel = {}
resources_license = 'license.txt'
resources_license_unwrapped = 'license_unwrapped.txt'
resources_license_rtf = 'license.txt'
resources_icon = 'icon.ico'
resources_icon_icns = 'icon.icns'
resources_background = 'background.png'
resources_preinstall = 'preinstall'
def __init__(self, config, store):
self.config = config
self.store = store
self.package_mode = PackageType.RUNTIME
def prepare(self):
'''
Can be overrided by subclasses to modify conditionally the package
'''
pass
def load_files(self):
pass
def package_dir(self):
'''
Gets the directory path where this package is stored
@return: directory path
@rtype: str
'''
return os.path.dirname(self.__file__)
def relative_path(self, path):
'''
Gets a path relative to the package's directory
@return: absolute path relative to the pacakge's directory
@rtype: str
'''
return os.path.abspath(os.path.join(self.package_dir(), path))
def files_list(self):
raise NotImplemented("'files_list' must be implemented by subclasses")
def devel_files_list(self):
raise NotImplemented("'devel_files_list' must be implemented by "
"subclasses")
def all_files_list(self):
raise NotImplemented("'all_files_list' must be implemented by "
"subclasses")
def set_mode(self, package_type):
self.package_mode = package_type
def get_install_dir(self):
try:
return self.install_dir[self.config.target_platform]
except:
return self.config.install_dir
def get_sys_deps(self, package_mode=None):
package_mode = package_mode or self.package_mode
if package_mode == PackageType.RUNTIME:
sys_deps = self.sys_deps
if package_mode == PackageType.DEVEL:
sys_deps = self.sys_deps_devel
if self.config.target_distro_version in sys_deps:
return sys_deps[self.config.target_distro_version]
if self.config.target_distro in sys_deps:
return sys_deps[self.config.target_distro]
return []
def identifier(self):
return '%s.%s.%s' % (self.org, self.config.target_arch, self.name)
def __str__(self):
return self.name
def __getattribute__(self, name):
attr = object.__getattribute__(self, name)
# Return relative path for resources
if name.startswith('resources'):
if attr is not None:
attr = self.relative_path(attr)
elif name == 'name':
attr += self.package_mode
elif name == 'shortdesc':
if self.package_mode == PackageType.DEVEL:
attr += ' (Development Files)'
elif name == 'uuid':
if self.package_mode == PackageType.DEVEL:
if attr is not None:
# Used the change the upgrade code for the devel package
uuid = list(attr)
if uuid[0] != '0':
uuid[0] = '0'
else:
uuid[0] = '1'
attr = ''.join(uuid)
return attr
class Package(PackageBase):
'''
Describes a set of files to produce disctribution packages for the
different target platforms. It provides the first level of packaging
allowing to create modular installers by aggregating several of them.
On Windows it will create a Merge Module (.msm) that can be easilly
integrated in an installer (.msi).
On OS X, it will produce a Package (.pkg) that can be integrated
in a MetaPackager.
On Linux it will create regular distribution packages such as a .deb on
Debian or a .rpm on RedHat
@cvar deps: list of packages dependencies
@type deps: list
@cvar files: list of files included in this package
@type files: list
@cvar platform_files: dict of platform files included in this package
@type platform_files: dict
@cvar files_devel: list of devel files included in this package
@type files_devel: list
@cvar platform_files_devel: dict of platform devel files included in
this package
@type platform_files_Devel: dict
@cvar osx_framework_library: name and link for the Framework library
@type osx_framework_library: tuple
'''
deps = list()
files = list()
platform_files = dict()
files_devel = list()
platform_files_devel = dict()
osx_framework_library = None
def __init__(self, config, store, cookbook):
PackageBase.__init__(self, config, store)
self.cookbook = cookbook
self.load_files()
def load_files(self):
self._files = self.files + \
self.platform_files.get(self.config.target_platform, [])
self._files_devel = self.files_devel + \
self.platform_files_devel.get(self.config.target_platform, [])
self._parse_files()
def recipes_dependencies(self):
deps = [x.split(':')[0] for x in self._files]
deps.extend([x.split(':')[0] for x in self._files_devel])
for name in self.deps:
p = self.store.get_package(name)
deps += p.recipes_dependencies()
return deps
def recipes_licenses(self):
return self._list_licenses(self._recipes_files)
def devel_recipes_licenses(self):
licenses = self._list_licenses(self._recipes_files_devel)
for recipe_name, categories in self._recipes_files.iteritems():
# also add development licenses for recipe from which used the
# 'libs' category
if len(categories) == 0 or FilesProvider.LIBS_CAT in categories:
r = self.cookbook.get_recipe(recipe_name)
if recipe_name in licenses:
licenses[recipe_name].update(
r.list_licenses_by_categories(categories))
else:
licenses[recipe_name] = \
r.list_licenses_by_categories(categories)
return licenses
def files_list(self):
files = []
for recipe_name, categories in self._recipes_files.iteritems():
recipe = self.cookbook.get_recipe(recipe_name)
if len(categories) == 0:
rfiles = recipe.dist_files_list()
else:
rfiles = recipe.files_list_by_categories(categories)
files.extend(rfiles)
return sorted(files)
def devel_files_list(self):
files = []
for recipe, categories in self._recipes_files.iteritems():
# only add development files for recipe from which used the 'libs'
# category
if len(categories) == 0 or FilesProvider.LIBS_CAT in categories:
rfiles = self.cookbook.get_recipe(recipe).devel_files_list()
files.extend(rfiles)
for recipe, categories in self._recipes_files_devel.iteritems():
recipe = self.cookbook.get_recipe(recipe)
if not categories:
rfiles = recipe.devel_files_list()
else:
rfiles = recipe.files_list_by_categories(categories)
files.extend(rfiles)
return sorted(files)
def all_files_list(self):
files = self.files_list()
files.extend(self.devel_files_list())
return sorted(files)
def _parse_files(self):
self._recipes_files = {}
for r in self._files:
l = r.split(':')
self._recipes_files[l[0]] = l[1:]
self._recipes_files_devel = {}
for r in self._files_devel:
l = r.split(':')
self._recipes_files_devel[l[0]] = l[1:]
def _list_licenses(self, recipes_files):
licenses = {}
for recipe_name, categories in recipes_files.iteritems():
r = self.cookbook.get_recipe(recipe_name)
# Package.files|files_devel|platform_files|platform_files_devel = \
# [recipe:category]
# => licenses = {recipe_name: {category: category_licenses}}
# Package.files|files_devel|platform_files|platform_files_devel = \
# [recipe]
# => licenses = {recipe_name: {None: recipe_licenses}}
licenses[recipe_name] = r.list_licenses_by_categories(categories)
return licenses
class MetaPackage(PackageBase):
'''
Group of L{cerbero.packages.package.Package} used to build a a modular
installer package.
On Windows it will result in a .msi installer that aggregates
Merge Modules created from a L{cerbero.packages.package.Package}.
On OS X it will result in a MetaPackage that aggreates .pkg packages
created a L{cerbero.packages.package.Package}.
On Linux it will result in in rpm and deb meta-packages, whith the packages
created as dependencies.
@cvar packages: list of packages grouped in this meta package
@type packages: list
@cvar platform_packages: list of platform packages
@type platform_packages: dict
@cvar root_env_var: name of the environment variable with the prefix
@type root_env_var: str
@cvar sdk_version: SDK version. This version will be used for the SDK
versionning and can defer from the installer one.
@type sdk_version: str
@cvar resources_wix_installer: wix installer tmeplate file
@cvar resources_wix_installer: string
'''
packages = []
root_env_var = 'CERBERO_SDK_ROOT'
platform_packages = {}
sdk_version = '1.0'
resources_wix_installer = None
def __init__(self, config, store):
PackageBase.__init__(self, config, store)
def list_packages(self):
return [p[0] for p in self.packages]
def recipes_dependencies(self):
deps = []
for package in self.store.get_package_deps(self.name, True):
deps.extend(package.recipes_dependencies())
return remove_list_duplicates(deps)
def files_list(self):
return self._list_files(Package.files_list)
def devel_files_list(self):
return self._list_files(Package.devel_files_list)
def all_files_list(self):
return self._list_files(Package.all_files_list)
def get_wix_upgrade_code(self):
m = self.package_mode
p = self.config.target_arch
return self.wix_upgrade_code[m][p]
def _list_files(self, func):
# for each package, call the function that list files
files = []
for package in self.store.get_package_deps(self.name):
files.extend(func(package))
files.sort()
return files
def __getattribute__(self, name):
if name == 'packages':
attr = PackageBase.__getattribute__(self, name)
ret = attr[:]
platform_attr_name = 'platform_%s' % name
if hasattr(self, platform_attr_name):
platform_attr = PackageBase.__getattribute__(self,
platform_attr_name)
if self.config.target_platform in platform_attr:
platform_list = platform_attr[self.config.target_platform]
ret.extend(platform_list)
return ret
else:
return PackageBase.__getattribute__(self, name)
class SDKPackage(MetaPackage):
'''
Creates an installer for SDK's.
On Windows the installer will add a new enviroment variable set in
root_env_var as well as a new key in the registry so that other installers
depending on the SDK could use them to set their environment easily and
check wether the requirements are met in the pre-installation step.
On OS X, the installer will create the tipical bundle structure used for
OS X Frameworks, creating the 'Versions' and 'Current' directories for
versionning as well as 'Headers' and 'Libraries' linking to the current
version of the framework.
On Linux everything just works without extra hacks ;)
@cvar root_env_var: name of the environment variable with the prefix
@type root_env_var: str
@cvar osx_framework_library: (namd, path) of the lib used for the Framework
@type osx_framework_library: tuple
'''
root_env_var = 'CERBERO_SDK_ROOT_%(arch)s'
osx_framework_library = None
def __init__(self, config, store):
MetaPackage.__init__(self, config, store)
def get_root_env_var(self):
return (self.root_env_var % {'arch': self.config.target_arch}).upper()
class InstallerPackage(MetaPackage):
'''
Creates an installer for a target SDK to extend it.
@cvar windows_sdk_reg: name of the required SDK
@type windows_sdk_reg: str
'''
windows_sdk_reg = None
def __init__(self, config, store):
MetaPackage.__init__(self, config, store)
class App(PackageBase):
'''
Create packages for applications.
An App package will not include development files and binaries could
be stripped when required. The App packager will not create a development
version.
On linux it will work in the same way as a MetaPackage, creating a package
with the application's recipe files and adding packages dependencies to be
managed by the distribution's package manager.
On OS X and Windows, the dependencies could be embeded in the installer
itself, creating an Application bundle on OS X and main menu shortcuts on
Windows, relocating the binaries properly.
@cvar app_recipe: Name used for the application
@type app_recipe: str
@cvar app_recipe: recipe that builds the application project
@type app_recipe: str
@cvar deps: list of packages dependencies
@type deps: list
@cvar embed_deps: include dependencies in the final package
@type embed_deps: boolean
@cvar commands: a list of with the application commands. The first will be
used for the main executable
@type command: list
@cvar wrapper: suffix filename for the main executable wrapper
@type wrapper: str
'''
app_name = None
app_recipe = None
embed_deps = True
deps = []
commands = [] # list of tuples ('CommandName', path/to/binary')
wrapper = 'app_wrapper.tpl'
resources_wix_installer = None
def __init__(self, config, store, cookbook):
PackageBase.__init__(self, config, store)
self.cookbook = cookbook
self._app_recipe = self.cookbook.get_recipe(self.app_recipe)
self.title = self.name
def recipes_dependencies(self):
deps = []
for dep in self.deps:
package = self.store.get_package(dep)
deps.extend(package.recipes_dependencies())
if self.app_recipe is not None:
deps.append(self.app_recipe)
return list(set(deps))
def files_list(self):
# for each package, call the function that list files
files = []
if self.embed_deps and self.config.target_platform != Platform.LINUX:
packages_deps = [self.store.get_package(x) for x in self.deps]
for package in packages_deps:
packages_deps.extend(self.store.get_package_deps(package))
packages_deps = list(set(packages_deps))
for package in packages_deps:
files.extend(package.files_list())
files.extend(self._app_recipe.files_list())
files.sort()
return files
def devel_files_list(self):
return []
def all_files_list(self):
return self.files_list()
def get_wix_upgrade_code(self):
m = self.package_mode
p = self.config.target_arch
return self.wix_upgrade_code[m][p]
def get_commands(self):
return self.commands.get(self.config.target_platform, [])
def get_wrapper(self, cmd, wrapper=None):
if self.config.target_platform == Platform.WINDOWS:
platform = 'win'
else:
platform = 'unix'
if wrapper is not None:
wrapper_file = self.relative_path('%s_%s' % (platform, wrapper))
else:
wrapper_file = os.path.join(self.config.data_dir, 'templates',
'%s_%s' % (self.wrapper, platform))
if not os.path.exists(wrapper_file):
return None
with open(wrapper_file, 'r') as f:
content = f.read()
content = content % {'prefix': self.config.prefix,
'py_prefix': self.config.py_prefix,
'cmd': self.config.prefix}
return content
def __getattribute__(self, name):
if name == 'deps':
attr = PackageBase.__getattribute__(self, name)
ret = attr[:]
platform_attr_name = 'platform_%s' % name
if hasattr(self, platform_attr_name):
platform_attr = PackageBase.__getattribute__(self,
platform_attr_name)
if self.config.target_platform in platform_attr:
platform_list = platform_attr[self.config.target_platform]
ret.extend(platform_list)
return ret
else:
return PackageBase.__getattribute__(self, name)
|
nicolewu/cerbero
|
cerbero/packages/package.py
|
Python
|
lgpl-2.1
| 20,637
|
[
"NAMD"
] |
7030d8c1ad9e87b03ba21e63177806eedf1c64a973f2ed62d2b834d728e86ad1
|
import numpy
from chainer import cuda
from chainer import function
from chainer import utils
from chainer.utils import type_check
class Gaussian(function.Function):
"""Gaussian sampling function.
In forward calculation, this function takes mean and logarithm of variance
as inputs, and draw a sample from a gaussian distribution.
"""
def __init__(self):
self.eps = None
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
m_type, v_type = in_types
type_check.expect(
m_type.dtype == numpy.float32,
v_type.dtype == numpy.float32,
m_type.shape == v_type.shape,
)
def forward_cpu(self, inputs):
self.retain_inputs(())
mean, ln_var = inputs
if self.eps is None:
self.eps = numpy.random.standard_normal(ln_var.shape) \
.astype(numpy.float32)
self.noise = numpy.exp(ln_var * mean.dtype.type(0.5)) * self.eps
return utils.force_array(mean + self.noise),
def forward_gpu(self, inputs):
self.retain_inputs(())
cupy = cuda.cupy
mean, ln_var = inputs
if self.eps is None:
self.eps = cupy.random.standard_normal(
ln_var.shape, dtype=mean.dtype)
self.noise = cuda.cupy.empty_like(mean)
self.noise = cuda.elementwise(
'T v, T e', 'T noise',
'noise = exp(v / 2) * e',
'gaussian_forward'
)(ln_var, self.eps)
return mean + self.noise,
def backward(self, inputs, grad_output):
g, = grad_output
return g, utils.force_array(g * self.noise * g.dtype.type(0.5))
def gaussian(mean, ln_var):
"""Gaussian sampling function.
It takes mean :math:`\\mu` and logarithm of variance
:math:`\\log(\\sigma^2)` as input and output a sample drawn from gaussian
:math:`N(\\mu, \\sigma)`.
Args:
mean (~chainer.Variable): Input variable representing mean
:math:`\\mu`.
ln_var (~chainer.Variable): Input variable representing logarithm of
variance :math:`\\log(\\sigma^2)`.
Returns:
~chainer.Variable: Output variable.
"""
return Gaussian()(mean, ln_var)
|
kiyukuta/chainer
|
chainer/functions/noise/gaussian.py
|
Python
|
mit
| 2,288
|
[
"Gaussian"
] |
3bfc457f2b224c05eedae7e6fb5adbaca78edaa46387523b04378e31ce0879c4
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Wrapper classes for Cif input and output from Structures.
"""
import math
import re
import os
import textwrap
import warnings
from collections import OrderedDict, deque
from io import StringIO
import numpy as np
from functools import partial
from pathlib import Path
from inspect import getfullargspec as getargspec
from itertools import groupby
from pymatgen.core.periodic_table import Element, Specie, get_el_sp, DummySpecie
from monty.io import zopen
from pymatgen.util.coord import in_coord_list_pbc, find_in_coord_list_pbc
from monty.string import remove_non_ascii
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.core.composition import Composition
from pymatgen.core.operations import SymmOp
from pymatgen.symmetry.groups import SpaceGroup, SYMM_DATA
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.electronic_structure.core import Magmom
from pymatgen.core.operations import MagSymmOp
from pymatgen.symmetry.maggroups import MagneticSpaceGroup
__author__ = "Shyue Ping Ong, Will Richards, Matthew Horton"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "4.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Sep 23, 2011"
sub_spgrp = partial(re.sub, r"[\s_]", "")
space_groups = {sub_spgrp(k): k for k in SYMM_DATA['space_group_encoding'].keys()} # type: ignore
space_groups.update({sub_spgrp(k): k for k in SYMM_DATA['space_group_encoding'].keys()}) # type: ignore
_COD_DATA = None
def _get_cod_data():
global _COD_DATA
if _COD_DATA is None:
import pymatgen
with open(os.path.join(pymatgen.symmetry.__path__[0],
"symm_ops.json")) \
as f:
import json
_COD_DATA = json.load(f)
return _COD_DATA
class CifBlock:
"""
Object for storing cif data. All data is stored in a single dictionary.
Data inside loops are stored in lists in the data dictionary, and
information on which keys are grouped together are stored in the loops
attribute.
"""
maxlen = 70 # not quite 80 so we can deal with semicolons and things
def __init__(self, data, loops, header):
"""
Args:
data: dict or OrderedDict of data to go into the cif. Values should
be convertible to string, or lists of these if the key is
in a loop
loops: list of lists of keys, grouped by which loop they should
appear in
header: name of the block (appears after the data_ on the first
line)
"""
self.loops = loops
self.data = data
# AJ says: CIF Block names cannot be more than 75 characters or you
# get an Exception
self.header = header[:74]
def __eq__(self, other):
return self.loops == other.loops \
and self.data == other.data \
and self.header == other.header
def __getitem__(self, key):
return self.data[key]
def __str__(self):
"""
Returns the cif string for the data block
"""
s = ["data_{}".format(self.header)]
keys = self.data.keys()
written = []
for k in keys:
if k in written:
continue
for l in self.loops:
# search for a corresponding loop
if k in l:
s.append(self._loop_to_string(l))
written.extend(l)
break
if k not in written:
# k didn't belong to a loop
v = self._format_field(self.data[k])
if len(k) + len(v) + 3 < self.maxlen:
s.append("{} {}".format(k, v))
else:
s.extend([k, v])
return "\n".join(s)
def _loop_to_string(self, loop):
s = "loop_"
for l in loop:
s += '\n ' + l
for fields in zip(*[self.data[k] for k in loop]):
line = "\n"
for val in map(self._format_field, fields):
if val[0] == ";":
s += line + "\n" + val
line = "\n"
elif len(line) + len(val) + 2 < self.maxlen:
line += " " + val
else:
s += line
line = '\n ' + val
s += line
return s
def _format_field(self, v):
v = v.__str__().strip()
if len(v) > self.maxlen:
return ';\n' + textwrap.fill(v, self.maxlen) + '\n;'
# add quotes if necessary
if v == '':
return '""'
if (" " in v or v[0] == "_") \
and not (v[0] == "'" and v[-1] == "'") \
and not (v[0] == '"' and v[-1] == '"'):
if "'" in v:
q = '"'
else:
q = "'"
v = q + v + q
return v
@classmethod
def _process_string(cls, string):
# remove comments
string = re.sub(r"(\s|^)#.*$", "", string, flags=re.MULTILINE)
# remove empty lines
string = re.sub(r"^\s*\n", "", string, flags=re.MULTILINE)
# remove non_ascii
string = remove_non_ascii(string)
# since line breaks in .cif files are mostly meaningless,
# break up into a stream of tokens to parse, rejoining multiline
# strings (between semicolons)
q = deque()
multiline = False
ml = []
# this regex splits on spaces, except when in quotes.
# starting quotes must not be preceded by non-whitespace
# (these get eaten by the first expression)
# ending quotes must not be followed by non-whitespace
p = re.compile(r'''([^'"\s][\S]*)|'(.*?)'(?!\S)|"(.*?)"(?!\S)''')
for l in string.splitlines():
if multiline:
if l.startswith(";"):
multiline = False
q.append(('', '', '', ' '.join(ml)))
ml = []
l = l[1:].strip()
else:
ml.append(l)
continue
if l.startswith(";"):
multiline = True
ml.append(l[1:].strip())
else:
for s in p.findall(l):
# s is tuple. location of the data in the tuple
# depends on whether it was quoted in the input
q.append(s)
return q
@classmethod
def from_string(cls, string):
"""
Reads CifBlock from string.
:param string: String representation.
:return: CifBlock
"""
q = cls._process_string(string)
header = q.popleft()[0][5:]
data = OrderedDict()
loops = []
while q:
s = q.popleft()
# cif keys aren't in quotes, so show up in s[0]
if s[0] == "_eof":
break
if s[0].startswith("_"):
try:
data[s[0]] = "".join(q.popleft())
except IndexError:
data[s[0]] = ""
elif s[0].startswith("loop_"):
columns = []
items = []
while q:
s = q[0]
if s[0].startswith("loop_") or not s[0].startswith("_"):
break
columns.append("".join(q.popleft()))
data[columns[-1]] = []
while q:
s = q[0]
if s[0].startswith("loop_") or s[0].startswith("_"):
break
items.append("".join(q.popleft()))
n = len(items) // len(columns)
assert len(items) % n == 0
loops.append(columns)
for k, v in zip(columns * n, items):
data[k].append(v.strip())
elif "".join(s).strip() != "":
warnings.warn("Possible issue in cif file"
" at line: {}".format("".join(s).strip()))
return cls(data, loops, header)
class CifFile:
"""
Reads and parses CifBlocks from a .cif file or string
"""
def __init__(self, data, orig_string=None, comment=None):
"""
Args:
data (OrderedDict): Of CifBlock objects.å
orig_string (str): The original cif string.
comment (str): Comment string.
"""
self.data = data
self.orig_string = orig_string
self.comment = comment or "# generated using pymatgen"
def __str__(self):
s = ["%s" % v for v in self.data.values()]
return self.comment + "\n" + "\n".join(s) + "\n"
@classmethod
def from_string(cls, string):
"""
Reads CifFile from a string.
:param string: String representation.
:return: CifFile
"""
d = OrderedDict()
for x in re.split(r"^\s*data_", "x\n" + string,
flags=re.MULTILINE | re.DOTALL)[1:]:
# Skip over Cif block that contains powder diffraction data.
# Some elements in this block were missing from CIF files in
# Springer materials/Pauling file DBs.
# This block anyway does not contain any structure information, and
# CifParser was also not parsing it.
if 'powder_pattern' in re.split(r"\n", x, 1)[0]:
continue
c = CifBlock.from_string("data_" + x)
d[c.header] = c
return cls(d, string)
@classmethod
def from_file(cls, filename):
"""
Reads CifFile from a filename.
:param filename: Filename
:return: CifFile
"""
with zopen(str(filename), "rt", errors="replace") as f:
return cls.from_string(f.read())
class CifParser:
"""
Parses a CIF file. Attempts to fix CIFs that are out-of-spec, but will
issue warnings if corrections applied. These are also stored in the
CifParser's errors attribute.
"""
def __init__(self, filename, occupancy_tolerance=1., site_tolerance=1e-4):
"""
Args:
filename (str): CIF filename, bzipped or gzipped CIF files are fine too.
occupancy_tolerance (float): If total occupancy of a site is between 1
and occupancy_tolerance, the occupancies will be scaled down to 1.
site_tolerance (float): This tolerance is used to determine if two
sites are sitting in the same position, in which case they will be
combined to a single disordered site. Defaults to 1e-4.
"""
self._occupancy_tolerance = occupancy_tolerance
self._site_tolerance = site_tolerance
if isinstance(filename, (str, Path)):
self._cif = CifFile.from_file(filename)
else:
self._cif = CifFile.from_string(filename.read())
# store if CIF contains features from non-core CIF dictionaries
# e.g. magCIF
self.feature_flags = {}
self.warnings = []
def is_magcif():
"""
Checks to see if file appears to be a magCIF file (heuristic).
"""
# Doesn't seem to be a canonical way to test if file is magCIF or
# not, so instead check for magnetic symmetry datanames
prefixes = ['_space_group_magn', '_atom_site_moment',
'_space_group_symop_magn']
for d in self._cif.data.values():
for k in d.data.keys():
for prefix in prefixes:
if prefix in k:
return True
return False
self.feature_flags['magcif'] = is_magcif()
def is_magcif_incommensurate():
"""
Checks to see if file contains an incommensurate magnetic
structure (heuristic).
"""
# Doesn't seem to be a canonical way to test if magCIF file
# describes incommensurate strucure or not, so instead check
# for common datanames
if not self.feature_flags["magcif"]:
return False
prefixes = ['_cell_modulation_dimension', '_cell_wave_vector']
for d in self._cif.data.values():
for k in d.data.keys():
for prefix in prefixes:
if prefix in k:
return True
return False
self.feature_flags['magcif_incommensurate'] = is_magcif_incommensurate()
for k in self._cif.data.keys():
# pass individual CifBlocks to _sanitize_data
self._cif.data[k] = self._sanitize_data(self._cif.data[k])
@staticmethod
def from_string(cif_string, occupancy_tolerance=1.):
"""
Creates a CifParser from a string.
Args:
cif_string (str): String representation of a CIF.
occupancy_tolerance (float): If total occupancy of a site is
between 1 and occupancy_tolerance, the occupancies will be
scaled down to 1.
Returns:
CifParser
"""
stream = StringIO(cif_string)
return CifParser(stream, occupancy_tolerance)
def _sanitize_data(self, data):
"""
Some CIF files do not conform to spec. This function corrects
known issues, particular in regards to Springer materials/
Pauling files.
This function is here so that CifParser can assume its
input conforms to spec, simplifying its implementation.
:param data: CifBlock
:return: data CifBlock
"""
"""
This part of the code deals with handling formats of data as found in
CIF files extracted from the Springer Materials/Pauling File
databases, and that are different from standard ICSD formats.
"""
# check for implicit hydrogens, warn if any present
if "_atom_site_attached_hydrogens" in data.data.keys():
attached_hydrogens = [str2float(x) for x in data.data['_atom_site_attached_hydrogens']
if str2float(x) != 0]
if len(attached_hydrogens) > 0:
self.warnings.append("Structure has implicit hydrogens defined, "
"parsed structure unlikely to be suitable for use "
"in calculations unless hydrogens added.")
# Check to see if "_atom_site_type_symbol" exists, as some test CIFs do
# not contain this key.
if "_atom_site_type_symbol" in data.data.keys():
# Keep a track of which data row needs to be removed.
# Example of a row: Nb,Zr '0.8Nb + 0.2Zr' .2a .m-3m 0 0 0 1 14
# 'rhombic dodecahedron, Nb<sub>14</sub>'
# Without this code, the above row in a structure would be parsed
# as an ordered site with only Nb (since
# CifParser would try to parse the first two characters of the
# label "Nb,Zr") and occupancy=1.
# However, this site is meant to be a disordered site with 0.8 of
# Nb and 0.2 of Zr.
idxs_to_remove = []
new_atom_site_label = []
new_atom_site_type_symbol = []
new_atom_site_occupancy = []
new_fract_x = []
new_fract_y = []
new_fract_z = []
for idx, el_row in enumerate(data["_atom_site_label"]):
# CIF files from the Springer Materials/Pauling File have
# switched the label and symbol. Thus, in the
# above shown example row, '0.8Nb + 0.2Zr' is the symbol.
# Below, we split the strings on ' + ' to
# check if the length (or number of elements) in the label and
# symbol are equal.
if len(data["_atom_site_type_symbol"][idx].split(' + ')) > \
len(data["_atom_site_label"][idx].split(' + ')):
# Dictionary to hold extracted elements and occupancies
els_occu = {}
# parse symbol to get element names and occupancy and store
# in "els_occu"
symbol_str = data["_atom_site_type_symbol"][idx]
symbol_str_lst = symbol_str.split(' + ')
for elocc_idx in range(len(symbol_str_lst)):
# Remove any bracketed items in the string
symbol_str_lst[elocc_idx] = re.sub(
r'\([0-9]*\)', '',
symbol_str_lst[elocc_idx].strip())
# Extract element name and its occupancy from the
# string, and store it as a
# key-value pair in "els_occ".
els_occu[str(re.findall(r'\D+', symbol_str_lst[
elocc_idx].strip())[1]).replace('<sup>', '')] = \
float('0' + re.findall(r'\.?\d+', symbol_str_lst[
elocc_idx].strip())[1])
x = str2float(data["_atom_site_fract_x"][idx])
y = str2float(data["_atom_site_fract_y"][idx])
z = str2float(data["_atom_site_fract_z"][idx])
for et, occu in els_occu.items():
# new atom site labels have 'fix' appended
new_atom_site_label.append(
et + '_fix' + str(len(new_atom_site_label)))
new_atom_site_type_symbol.append(et)
new_atom_site_occupancy.append(str(occu))
new_fract_x.append(str(x))
new_fract_y.append(str(y))
new_fract_z.append(str(z))
idxs_to_remove.append(idx)
# Remove the original row by iterating over all keys in the CIF
# data looking for lists, which indicates
# multiple data items, one for each row, and remove items from the
# list that corresponds to the removed row,
# so that it's not processed by the rest of this function (which
# would result in an error).
for original_key in data.data:
if isinstance(data.data[original_key], list):
for id in sorted(idxs_to_remove, reverse=True):
del data.data[original_key][id]
if len(idxs_to_remove) > 0:
self.warnings.append("Pauling file corrections applied.")
data.data["_atom_site_label"] += new_atom_site_label
data.data["_atom_site_type_symbol"] += new_atom_site_type_symbol
data.data["_atom_site_occupancy"] += new_atom_site_occupancy
data.data["_atom_site_fract_x"] += new_fract_x
data.data["_atom_site_fract_y"] += new_fract_y
data.data["_atom_site_fract_z"] += new_fract_z
"""
This fixes inconsistencies in naming of several magCIF tags
as a result of magCIF being in widespread use prior to
specification being finalized (on advice of Branton Campbell).
"""
if self.feature_flags["magcif"]:
# CIF-1 style has all underscores, interim standard
# had period before magn instead of before the final
# component (e.g. xyz)
# we want to standardize on a specific key, to simplify
# parsing code
correct_keys = ["_space_group_symop_magn_operation.xyz",
"_space_group_symop_magn_centering.xyz",
"_space_group_magn.name_BNS",
"_space_group_magn.number_BNS",
"_atom_site_moment_crystalaxis_x",
"_atom_site_moment_crystalaxis_y",
"_atom_site_moment_crystalaxis_z",
"_atom_site_moment_label"]
# cannot mutate OrderedDict during enumeration,
# so store changes we want to make
changes_to_make = {}
for original_key in data.data:
for correct_key in correct_keys:
# convert to all underscore
trial_key = "_".join(correct_key.split("."))
test_key = "_".join(original_key.split("."))
if trial_key == test_key:
changes_to_make[correct_key] = original_key
# make changes
for correct_key, original_key in changes_to_make.items():
data.data[correct_key] = data.data[original_key]
# renamed_keys maps interim_keys to final_keys
renamed_keys = {
"_magnetic_space_group.transform_to_standard_Pp_abc":
"_space_group_magn.transform_BNS_Pp_abc"}
changes_to_make = {}
for interim_key, final_key in renamed_keys.items():
if data.data.get(interim_key):
changes_to_make[final_key] = interim_key
if len(changes_to_make) > 0:
self.warnings.append("Keys changed to match new magCIF specification.")
for final_key, interim_key in changes_to_make.items():
data.data[final_key] = data.data[interim_key]
# check for finite precision frac co-ordinates (e.g. 0.6667 instead of 0.6666666...7)
# this can sometimes cause serious issues when applying symmetry operations
important_fracs = (1 / 3., 2 / 3.)
fracs_to_change = {}
for label in ('_atom_site_fract_x', '_atom_site_fract_y', '_atom_site_fract_z'):
if label in data.data.keys():
for idx, frac in enumerate(data.data[label]):
try:
frac = str2float(frac)
except Exception:
# co-ordinate might not be defined e.g. '?'
continue
for comparison_frac in important_fracs:
if abs(1 - frac / comparison_frac) < 1e-4:
fracs_to_change[(label, idx)] = str(comparison_frac)
if fracs_to_change:
self.warnings.append("Some fractional co-ordinates rounded to ideal values to "
"avoid issues with finite precision.")
for (label, idx), val in fracs_to_change.items():
data.data[label][idx] = val
return data
def _unique_coords(self, coords_in, magmoms_in=None, lattice=None):
"""
Generate unique coordinates using coord and symmetry positions
and also their corresponding magnetic moments, if supplied.
"""
coords = []
if magmoms_in:
magmoms = []
if len(magmoms_in) != len(coords_in):
raise ValueError
for tmp_coord, tmp_magmom in zip(coords_in, magmoms_in):
for op in self.symmetry_operations:
coord = op.operate(tmp_coord)
coord = np.array([i - math.floor(i) for i in coord])
if isinstance(op, MagSymmOp):
# Up to this point, magmoms have been defined relative
# to crystal axis. Now convert to Cartesian and into
# a Magmom object.
magmom = Magmom.from_moment_relative_to_crystal_axes(
op.operate_magmom(tmp_magmom),
lattice=lattice
)
else:
magmom = Magmom(tmp_magmom)
if not in_coord_list_pbc(coords, coord,
atol=self._site_tolerance):
coords.append(coord)
magmoms.append(magmom)
return coords, magmoms
else:
for tmp_coord in coords_in:
for op in self.symmetry_operations:
coord = op.operate(tmp_coord)
coord = np.array([i - math.floor(i) for i in coord])
if not in_coord_list_pbc(coords, coord,
atol=self._site_tolerance):
coords.append(coord)
return coords, [Magmom(0)] * len(coords) # return dummy magmoms
def get_lattice(self, data, length_strings=("a", "b", "c"),
angle_strings=("alpha", "beta", "gamma"),
lattice_type=None):
"""
Generate the lattice from the provided lattice parameters. In
the absence of all six lattice parameters, the crystal system
and necessary parameters are parsed
"""
try:
lengths = [str2float(data["_cell_length_" + i])
for i in length_strings]
angles = [str2float(data["_cell_angle_" + i])
for i in angle_strings]
if not lattice_type:
return Lattice.from_parameters(*lengths, *angles)
else:
return getattr(Lattice, lattice_type)(*(lengths + angles))
except KeyError:
# Missing Key search for cell setting
for lattice_lable in ["_symmetry_cell_setting",
"_space_group_crystal_system"]:
if data.data.get(lattice_lable):
lattice_type = data.data.get(lattice_lable).lower()
try:
required_args = getargspec(
getattr(Lattice, lattice_type)).args
lengths = (l for l in length_strings
if l in required_args)
angles = (a for a in angle_strings
if a in required_args)
return self.get_lattice(data, lengths, angles,
lattice_type=lattice_type)
except AttributeError as exc:
self.warnings.append(str(exc))
warnings.warn(exc)
else:
return None
def get_symops(self, data):
"""
In order to generate symmetry equivalent positions, the symmetry
operations are parsed. If the symops are not present, the space
group symbol is parsed, and symops are generated.
"""
symops = []
for symmetry_label in ["_symmetry_equiv_pos_as_xyz",
"_symmetry_equiv_pos_as_xyz_",
"_space_group_symop_operation_xyz",
"_space_group_symop_operation_xyz_"]:
if data.data.get(symmetry_label):
xyz = data.data.get(symmetry_label)
if isinstance(xyz, str):
msg = "A 1-line symmetry op P1 CIF is detected!"
warnings.warn(msg)
self.warnings.append(msg)
xyz = [xyz]
try:
symops = [SymmOp.from_xyz_string(s)
for s in xyz]
break
except ValueError:
continue
if not symops:
# Try to parse symbol
for symmetry_label in ["_symmetry_space_group_name_H-M",
"_symmetry_space_group_name_H_M",
"_symmetry_space_group_name_H-M_",
"_symmetry_space_group_name_H_M_",
"_space_group_name_Hall",
"_space_group_name_Hall_",
"_space_group_name_H-M_alt",
"_space_group_name_H-M_alt_",
"_symmetry_space_group_name_hall",
"_symmetry_space_group_name_hall_",
"_symmetry_space_group_name_h-m",
"_symmetry_space_group_name_h-m_"]:
sg = data.data.get(symmetry_label)
if sg:
sg = sub_spgrp(sg)
try:
spg = space_groups.get(sg)
if spg:
symops = SpaceGroup(spg).symmetry_ops
msg = "No _symmetry_equiv_pos_as_xyz type key found. " \
"Spacegroup from %s used." % symmetry_label
warnings.warn(msg)
self.warnings.append(msg)
break
except ValueError:
# Ignore any errors
pass
try:
for d in _get_cod_data():
if sg == re.sub(r"\s+", "",
d["hermann_mauguin"]):
xyz = d["symops"]
symops = [SymmOp.from_xyz_string(s)
for s in xyz]
msg = "No _symmetry_equiv_pos_as_xyz type key found. " \
"Spacegroup from %s used." % symmetry_label
warnings.warn(msg)
self.warnings.append(msg)
break
except Exception:
continue
if symops:
break
if not symops:
# Try to parse International number
for symmetry_label in ["_space_group_IT_number",
"_space_group_IT_number_",
"_symmetry_Int_Tables_number",
"_symmetry_Int_Tables_number_"]:
if data.data.get(symmetry_label):
try:
i = int(str2float(data.data.get(symmetry_label)))
symops = SpaceGroup.from_int_number(i).symmetry_ops
break
except ValueError:
continue
if not symops:
msg = "No _symmetry_equiv_pos_as_xyz type key found. " \
"Defaulting to P1."
warnings.warn(msg)
self.warnings.append(msg)
symops = [SymmOp.from_xyz_string(s) for s in ['x', 'y', 'z']]
return symops
def get_magsymops(self, data):
"""
Equivalent to get_symops except for magnetic symmetry groups.
Separate function since additional operation for time reversal symmetry
(which changes magnetic moments on sites) needs to be returned.
"""
magsymmops = []
# check to see if magCIF file explicitly contains magnetic symmetry operations
if data.data.get("_space_group_symop_magn_operation.xyz"):
xyzt = data.data.get("_space_group_symop_magn_operation.xyz")
if isinstance(xyzt, str):
xyzt = [xyzt]
magsymmops = [MagSymmOp.from_xyzt_string(s) for s in xyzt]
if data.data.get("_space_group_symop_magn_centering.xyz"):
xyzt = data.data.get("_space_group_symop_magn_centering.xyz")
if isinstance(xyzt, str):
xyzt = [xyzt]
centering_symops = [MagSymmOp.from_xyzt_string(s) for s in xyzt]
all_ops = []
for op in magsymmops:
for centering_op in centering_symops:
new_translation = [i - np.floor(i) for i
in
op.translation_vector + centering_op.translation_vector]
new_time_reversal = op.time_reversal * centering_op.time_reversal
all_ops.append(
MagSymmOp.from_rotation_and_translation_and_time_reversal(
rotation_matrix=op.rotation_matrix,
translation_vec=new_translation,
time_reversal=new_time_reversal))
magsymmops = all_ops
# else check to see if it specifies a magnetic space group
elif data.data.get("_space_group_magn.name_BNS") or data.data.get(
"_space_group_magn.number_BNS"):
if data.data.get("_space_group_magn.name_BNS"):
# get BNS label for MagneticSpaceGroup()
id = data.data.get("_space_group_magn.name_BNS")
else:
# get BNS number for MagneticSpaceGroup()
# by converting string to list of ints
id = list(map(int, (
data.data.get("_space_group_magn.number_BNS").split("."))))
if data.data.get("_space_group_magn.transform_BNS_Pp_abc"):
if data.data.get(
"_space_group_magn.transform_BNS_Pp_abc") != "a,b,c;0,0,0":
jf = data.data.get("_space_group_magn.transform_BNS_Pp_abc")
msg = MagneticSpaceGroup(id, jf)
elif data.data.get("_space_group_magn.transform_BNS_Pp"):
return NotImplementedError(
"Incomplete specification to implement.")
else:
msg = MagneticSpaceGroup(id)
magsymmops = msg.symmetry_ops
if not magsymmops:
msg = "No magnetic symmetry detected, using primitive symmetry."
warnings.warn(msg)
self.warnings.append(msg)
magsymmops = [MagSymmOp.from_xyzt_string("x, y, z, 1")]
return magsymmops
def parse_oxi_states(self, data):
"""
Parse oxidation states from data dictionary
"""
try:
oxi_states = {
data["_atom_type_symbol"][i]:
str2float(data["_atom_type_oxidation_number"][i])
for i in range(len(data["_atom_type_symbol"]))}
# attempt to strip oxidation state from _atom_type_symbol
# in case the label does not contain an oxidation state
for i, symbol in enumerate(data["_atom_type_symbol"]):
oxi_states[re.sub(r"\d?[\+,\-]?$", "", symbol)] = \
str2float(data["_atom_type_oxidation_number"][i])
except (ValueError, KeyError):
oxi_states = None
return oxi_states
def parse_magmoms(self, data, lattice=None):
"""
Parse atomic magnetic moments from data dictionary
"""
if lattice is None:
raise Exception(
'Magmoms given in terms of crystal axes in magCIF spec.')
try:
magmoms = {
data["_atom_site_moment_label"][i]:
np.array(
[str2float(data["_atom_site_moment_crystalaxis_x"][i]),
str2float(data["_atom_site_moment_crystalaxis_y"][i]),
str2float(data["_atom_site_moment_crystalaxis_z"][i])]
)
for i in range(len(data["_atom_site_moment_label"]))
}
except (ValueError, KeyError):
return None
return magmoms
def _parse_symbol(self, sym):
"""
Parse a string with a symbol to extract a string representing an element.
Args:
sym (str): A symbol to be parsed.
Returns:
A string with the parsed symbol. None if no parsing was possible.
"""
# Common representations for elements/water in cif files
# TODO: fix inconsistent handling of water
special = {"Hw": "H", "Ow": "O", "Wat": "O",
"wat": "O", "OH": "", "OH2": "", "NO3": "N"}
parsed_sym = None
# try with special symbols, otherwise check the first two letters,
# then the first letter alone. If everything fails try extracting the
# first letters.
m_sp = re.match("|".join(special.keys()), sym)
if m_sp:
parsed_sym = special[m_sp.group()]
elif Element.is_valid_symbol(sym[:2].title()):
parsed_sym = sym[:2].title()
elif Element.is_valid_symbol(sym[0].upper()):
parsed_sym = sym[0].upper()
else:
m = re.match(r"w?[A-Z][a-z]*", sym)
if m:
parsed_sym = m.group()
if parsed_sym is not None and (m_sp or not re.match(r"{}\d*".format(parsed_sym), sym)):
msg = "{} parsed as {}".format(sym, parsed_sym)
warnings.warn(msg)
self.warnings.append(msg)
return parsed_sym
def _get_structure(self, data, primitive):
"""
Generate structure from part of the cif.
"""
def get_num_implicit_hydrogens(sym):
num_h = {"Wat": 2, "wat": 2, "O-H": 1}
return num_h.get(sym[:3], 0)
lattice = self.get_lattice(data)
# if magCIF, get magnetic symmetry moments and magmoms
# else standard CIF, and use empty magmom dict
if self.feature_flags["magcif_incommensurate"]:
raise NotImplementedError(
"Incommensurate structures not currently supported.")
elif self.feature_flags["magcif"]:
self.symmetry_operations = self.get_magsymops(data)
magmoms = self.parse_magmoms(data, lattice=lattice)
else:
self.symmetry_operations = self.get_symops(data)
magmoms = {}
oxi_states = self.parse_oxi_states(data)
coord_to_species = OrderedDict()
coord_to_magmoms = OrderedDict()
def get_matching_coord(coord):
keys = list(coord_to_species.keys())
coords = np.array(keys)
for op in self.symmetry_operations:
c = op.operate(coord)
inds = find_in_coord_list_pbc(coords, c,
atol=self._site_tolerance)
# cant use if inds, because python is dumb and np.array([0]) evaluates
# to False
if len(inds):
return keys[inds[0]]
return False
for i in range(len(data["_atom_site_label"])):
try:
# If site type symbol exists, use it. Otherwise, we use the
# label.
symbol = self._parse_symbol(data["_atom_site_type_symbol"][i])
num_h = get_num_implicit_hydrogens(
data["_atom_site_type_symbol"][i])
except KeyError:
symbol = self._parse_symbol(data["_atom_site_label"][i])
num_h = get_num_implicit_hydrogens(data["_atom_site_label"][i])
if not symbol:
continue
if oxi_states is not None:
o_s = oxi_states.get(symbol, 0)
# use _atom_site_type_symbol if possible for oxidation state
if "_atom_site_type_symbol" in data.data.keys():
oxi_symbol = data["_atom_site_type_symbol"][i]
o_s = oxi_states.get(oxi_symbol, o_s)
try:
el = Specie(symbol, o_s)
except Exception:
el = DummySpecie(symbol, o_s)
else:
el = get_el_sp(symbol)
x = str2float(data["_atom_site_fract_x"][i])
y = str2float(data["_atom_site_fract_y"][i])
z = str2float(data["_atom_site_fract_z"][i])
magmom = magmoms.get(data["_atom_site_label"][i],
np.array([0, 0, 0]))
try:
occu = str2float(data["_atom_site_occupancy"][i])
except (KeyError, ValueError):
occu = 1
if occu > 0:
coord = (x, y, z)
match = get_matching_coord(coord)
comp_d = {el: occu}
if num_h > 0:
comp_d["H"] = num_h
self.warnings.append("Structure has implicit hydrogens defined, "
"parsed structure unlikely to be suitable for use "
"in calculations unless hydrogens added.")
comp = Composition(comp_d)
if not match:
coord_to_species[coord] = comp
coord_to_magmoms[coord] = magmom
else:
coord_to_species[match] += comp
# disordered magnetic not currently supported
coord_to_magmoms[match] = None
sum_occu = [sum(c.values()) for c in coord_to_species.values()
if not set(c.elements) == {Element("O"), Element("H")}]
if any([o > 1 for o in sum_occu]):
msg = "Some occupancies (%s) sum to > 1! If they are within " \
"the tolerance, they will be rescaled." % str(sum_occu)
warnings.warn(msg)
self.warnings.append(msg)
allspecies = []
allcoords = []
allmagmoms = []
allhydrogens = []
# check to see if magCIF file is disordered
if self.feature_flags["magcif"]:
for k, v in coord_to_magmoms.items():
if v is None:
# Proposed solution to this is to instead store magnetic
# moments as Specie 'spin' property, instead of site
# property, but this introduces ambiguities for end user
# (such as unintended use of `spin` and Specie will have
# fictious oxidation state).
raise NotImplementedError(
'Disordered magnetic structures not currently supported.')
if coord_to_species.items():
for comp, group in groupby(
sorted(list(coord_to_species.items()), key=lambda x: x[1]),
key=lambda x: x[1]):
tmp_coords = [site[0] for site in group]
tmp_magmom = [coord_to_magmoms[tmp_coord] for tmp_coord in
tmp_coords]
if self.feature_flags["magcif"]:
coords, magmoms = self._unique_coords(tmp_coords,
magmoms_in=tmp_magmom,
lattice=lattice)
else:
coords, magmoms = self._unique_coords(tmp_coords)
if set(comp.elements) == {Element("O"), Element("H")}:
# O with implicit hydrogens
im_h = comp["H"]
species = Composition({"O": comp["O"]})
else:
im_h = 0
species = comp
allhydrogens.extend(len(coords) * [im_h])
allcoords.extend(coords)
allspecies.extend(len(coords) * [species])
allmagmoms.extend(magmoms)
# rescale occupancies if necessary
for i, species in enumerate(allspecies):
totaloccu = sum(species.values())
if 1 < totaloccu <= self._occupancy_tolerance:
allspecies[i] = species / totaloccu
if allspecies and len(allspecies) == len(allcoords) \
and len(allspecies) == len(allmagmoms):
site_properties = dict()
if any(allhydrogens):
assert len(allhydrogens) == len(allcoords)
site_properties["implicit_hydrogens"] = allhydrogens
if self.feature_flags["magcif"]:
site_properties["magmom"] = allmagmoms
if len(site_properties) == 0:
site_properties = None
struct = Structure(lattice, allspecies, allcoords,
site_properties=site_properties)
struct = struct.get_sorted_structure()
if primitive and self.feature_flags['magcif']:
struct = struct.get_primitive_structure(use_site_props=True)
elif primitive:
struct = struct.get_primitive_structure()
struct = struct.get_reduced_structure()
return struct
def get_structures(self, primitive=True):
"""
Return list of structures in CIF file. primitive boolean sets whether a
conventional cell structure or primitive cell structure is returned.
Args:
primitive (bool): Set to False to return conventional unit cells.
Defaults to True. With magnetic CIF files, will return primitive
magnetic cell which may be larger than nuclear primitive cell.
Returns:
List of Structures.
"""
structures = []
for d in self._cif.data.values():
try:
s = self._get_structure(d, primitive)
if s:
structures.append(s)
except (KeyError, ValueError) as exc:
# Warn the user (Errors should never pass silently)
# A user reported a problem with cif files produced by Avogadro
# in which the atomic coordinates are in Cartesian coords.
self.warnings.append(str(exc))
warnings.warn(str(exc))
if self.warnings:
warnings.warn("Issues encountered while parsing CIF: %s" % "\n".join(self.warnings))
if len(structures) == 0:
raise ValueError("Invalid cif file with no structures!")
return structures
def get_bibtex_string(self):
"""
Get BibTeX reference from CIF file.
:param data:
:return: BibTeX string
"""
try:
from pybtex.database import BibliographyData, Entry
except ImportError:
raise RuntimeError("Bibliographic data extraction requires pybtex.")
bibtex_keys = {'author': ('_publ_author_name', '_citation_author_name'),
'title': ('_publ_section_title', '_citation_title'),
'journal': ('_journal_name_full', '_journal_name_abbrev',
'_citation_journal_full', '_citation_journal_abbrev'),
'volume': ('_journal_volume', '_citation_journal_volume'),
'year': ('_journal_year', '_citation_year'),
'number': ('_journal_number', '_citation_number'),
'page_first': ('_journal_page_first', '_citation_page_first'),
'page_last': ('_journal_page_last', '_citation_page_last'),
'doi': ('_journal_DOI', '_citation_DOI')}
entries = {}
# TODO: parse '_publ_section_references' when it exists?
# TODO: CIF specification supports multiple citations.
for idx, data in enumerate(self._cif.data.values()):
# convert to lower-case keys, some cif files inconsistent
data = {k.lower(): v for k, v in data.data.items()}
bibtex_entry = {}
for field, tags in bibtex_keys.items():
for tag in tags:
if tag in data:
if isinstance(data[tag], list):
bibtex_entry[field] = data[tag][0]
else:
bibtex_entry[field] = data[tag]
# convert to bibtex author format ('and' delimited)
if 'author' in bibtex_entry:
# separate out semicolon authors
if isinstance(bibtex_entry["author"], str):
if ";" in bibtex_entry["author"]:
bibtex_entry["author"] = bibtex_entry["author"].split(";")
if isinstance(bibtex_entry['author'], list):
bibtex_entry['author'] = ' and '.join(bibtex_entry['author'])
# convert to bibtex page range format, use empty string if not specified
if ('page_first' in bibtex_entry) or ('page_last' in bibtex_entry):
bibtex_entry['pages'] = '{0}--{1}'.format(bibtex_entry.get('page_first', ''),
bibtex_entry.get('page_last', ''))
bibtex_entry.pop('page_first', None) # and remove page_first, page_list if present
bibtex_entry.pop('page_last', None)
# cite keys are given as cif-reference-idx in order they are found
entries['cifref{}'.format(idx)] = Entry('article', list(bibtex_entry.items()))
return BibliographyData(entries).to_string(bib_format='bibtex')
def as_dict(self):
"""
:return: MSONable dict
"""
d = OrderedDict()
for k, v in self._cif.data.items():
d[k] = {}
for k2, v2 in v.data.items():
d[k][k2] = v2
return d
@property
def has_errors(self):
"""
:return: Whether there are errors/warnings detected in CIF parsing.
"""
return len(self.warnings) > 0
class CifWriter:
"""
A wrapper around CifFile to write CIF files from pymatgen structures.
"""
def __init__(self, struct, symprec=None, write_magmoms=False):
"""
Args:
struct (Structure): structure to write
symprec (float): If not none, finds the symmetry of the structure
and writes the cif with symmetry information. Passes symprec
to the SpacegroupAnalyzer
write_magmoms (bool): If True, will write magCIF file. Incompatible
with symprec
"""
if write_magmoms and symprec:
warnings.warn(
"Magnetic symmetry cannot currently be detected by pymatgen,"
"disabling symmetry detection.")
symprec = None
format_str = "{:.8f}"
block = OrderedDict()
loops = []
spacegroup = ("P 1", 1)
if symprec is not None:
sf = SpacegroupAnalyzer(struct, symprec)
spacegroup = (sf.get_space_group_symbol(),
sf.get_space_group_number())
# Needs the refined struture when using symprec. This converts
# primitive to conventional structures, the standard for CIF.
struct = sf.get_refined_structure()
latt = struct.lattice
comp = struct.composition
no_oxi_comp = comp.element_composition
block["_symmetry_space_group_name_H-M"] = spacegroup[0]
for cell_attr in ['a', 'b', 'c']:
block["_cell_length_" + cell_attr] = format_str.format(
getattr(latt, cell_attr))
for cell_attr in ['alpha', 'beta', 'gamma']:
block["_cell_angle_" + cell_attr] = format_str.format(
getattr(latt, cell_attr))
block["_symmetry_Int_Tables_number"] = spacegroup[1]
block["_chemical_formula_structural"] = no_oxi_comp.reduced_formula
block["_chemical_formula_sum"] = no_oxi_comp.formula
block["_cell_volume"] = "%.8f" % latt.volume
reduced_comp, fu = no_oxi_comp.get_reduced_composition_and_factor()
block["_cell_formula_units_Z"] = str(int(fu))
if symprec is None:
block["_symmetry_equiv_pos_site_id"] = ["1"]
block["_symmetry_equiv_pos_as_xyz"] = ["x, y, z"]
else:
sf = SpacegroupAnalyzer(struct, symprec)
symmops = []
for op in sf.get_symmetry_operations():
v = op.translation_vector
symmops.append(SymmOp.from_rotation_and_translation(
op.rotation_matrix, v))
ops = [op.as_xyz_string() for op in symmops]
block["_symmetry_equiv_pos_site_id"] = \
["%d" % i for i in range(1, len(ops) + 1)]
block["_symmetry_equiv_pos_as_xyz"] = ops
loops.append(["_symmetry_equiv_pos_site_id",
"_symmetry_equiv_pos_as_xyz"])
try:
symbol_to_oxinum = OrderedDict([
(el.__str__(),
float(el.oxi_state))
for el in sorted(comp.elements)])
block["_atom_type_symbol"] = symbol_to_oxinum.keys()
block["_atom_type_oxidation_number"] = symbol_to_oxinum.values()
loops.append(["_atom_type_symbol", "_atom_type_oxidation_number"])
except (TypeError, AttributeError):
symbol_to_oxinum = OrderedDict([(el.symbol, 0) for el in
sorted(comp.elements)])
atom_site_type_symbol = []
atom_site_symmetry_multiplicity = []
atom_site_fract_x = []
atom_site_fract_y = []
atom_site_fract_z = []
atom_site_label = []
atom_site_occupancy = []
atom_site_moment_label = []
atom_site_moment_crystalaxis_x = []
atom_site_moment_crystalaxis_y = []
atom_site_moment_crystalaxis_z = []
count = 0
if symprec is None:
for site in struct:
for sp, occu in sorted(site.species.items()):
atom_site_type_symbol.append(sp.__str__())
atom_site_symmetry_multiplicity.append("1")
atom_site_fract_x.append("{0:f}".format(site.a))
atom_site_fract_y.append("{0:f}".format(site.b))
atom_site_fract_z.append("{0:f}".format(site.c))
atom_site_label.append("{}{}".format(sp.symbol, count))
atom_site_occupancy.append(occu.__str__())
magmom = Magmom(
site.properties.get('magmom', getattr(sp, 'spin', 0)))
if write_magmoms and abs(magmom) > 0:
moment = Magmom.get_moment_relative_to_crystal_axes(
magmom, latt)
atom_site_moment_label.append(
"{}{}".format(sp.symbol, count))
atom_site_moment_crystalaxis_x.append("%.5f" % moment[0])
atom_site_moment_crystalaxis_y.append("%.5f" % moment[1])
atom_site_moment_crystalaxis_z.append("%.5f" % moment[2])
count += 1
else:
# The following just presents a deterministic ordering.
unique_sites = [
(sorted(sites, key=lambda s: tuple([abs(x) for x in
s.frac_coords]))[0],
len(sites))
for sites in sf.get_symmetrized_structure().equivalent_sites
]
for site, mult in sorted(
unique_sites,
key=lambda t: (t[0].species.average_electroneg,
-t[1], t[0].a, t[0].b, t[0].c)):
for sp, occu in site.species.items():
atom_site_type_symbol.append(sp.__str__())
atom_site_symmetry_multiplicity.append("%d" % mult)
atom_site_fract_x.append("{0:f}".format(site.a))
atom_site_fract_y.append("{0:f}".format(site.b))
atom_site_fract_z.append("{0:f}".format(site.c))
atom_site_label.append("{}{}".format(sp.symbol, count))
atom_site_occupancy.append(occu.__str__())
count += 1
block["_atom_site_type_symbol"] = atom_site_type_symbol
block["_atom_site_label"] = atom_site_label
block["_atom_site_symmetry_multiplicity"] = \
atom_site_symmetry_multiplicity
block["_atom_site_fract_x"] = atom_site_fract_x
block["_atom_site_fract_y"] = atom_site_fract_y
block["_atom_site_fract_z"] = atom_site_fract_z
block["_atom_site_occupancy"] = atom_site_occupancy
loops.append(["_atom_site_type_symbol",
"_atom_site_label",
"_atom_site_symmetry_multiplicity",
"_atom_site_fract_x",
"_atom_site_fract_y",
"_atom_site_fract_z",
"_atom_site_occupancy"])
if write_magmoms:
block["_atom_site_moment_label"] = atom_site_moment_label
block[
"_atom_site_moment_crystalaxis_x"] = atom_site_moment_crystalaxis_x
block[
"_atom_site_moment_crystalaxis_y"] = atom_site_moment_crystalaxis_y
block[
"_atom_site_moment_crystalaxis_z"] = atom_site_moment_crystalaxis_z
loops.append(["_atom_site_moment_label",
"_atom_site_moment_crystalaxis_x",
"_atom_site_moment_crystalaxis_y",
"_atom_site_moment_crystalaxis_z"])
d = OrderedDict()
d[comp.reduced_formula] = CifBlock(block, loops, comp.reduced_formula)
self._cf = CifFile(d)
def __str__(self):
"""
Returns the cif as a string.
"""
return self._cf.__str__()
def write_file(self, filename):
"""
Write the cif file.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
def str2float(text):
"""
Remove uncertainty brackets from strings and return the float.
"""
try:
# Note that the ending ) is sometimes missing. That is why the code has
# been modified to treat it as optional. Same logic applies to lists.
return float(re.sub(r"\(.+\)*", "", text))
except TypeError:
if isinstance(text, list) and len(text) == 1:
return float(re.sub(r"\(.+\)*", "", text[0]))
except ValueError as ex:
if text.strip() == ".":
return 0
raise ex
|
fraricci/pymatgen
|
pymatgen/io/cif.py
|
Python
|
mit
| 58,757
|
[
"Avogadro",
"CRYSTAL",
"pymatgen"
] |
74e8b67a48355d2d9183039b62b675a855313a60effb33f7c1d6db8c169e90d5
|
""" PilotsLoggingHandler is the implementation of the PilotsLogging service
The following methods are available in the Service interface
addPilotsLogging()
getPilotsLogging
deletePilotsLogging()
"""
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.Core.Utilities.ObjectLoader import ObjectLoader
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Resources.MessageQueue.MQCommunication import createConsumer
class PilotsLoggingHandler(RequestHandler):
"""Server side functions for Pilots Logging service"""
@classmethod
def initializeHandler(cls, serviceInfoDict):
"""Initialization of Pilots Logging service"""
cls.consumersSet = set()
try:
result = ObjectLoader().loadObject("WorkloadManagementSystem.DB.PilotsLoggingDB", "PilotsLoggingDB")
if not result["OK"]:
return result
cls.pilotsLoggingDB = result["Value"]()
except RuntimeError as excp:
return S_ERROR("Can't connect to DB: %s" % excp)
queue = cls.srv_getCSOption("PilotsLoggingQueue")
# This is pretty awful hack. Somehow, for uknown reason, I cannot access CS with srv_getCSOption.
# The only way is using full CS path, so I'm using it as a backup solution.
if not queue:
queue = gConfig.getValue(serviceInfoDict["serviceSectionPath"] + "/PilotsLoggingQueue")
result = createConsumer(queue, callback=cls.consumingCallback)
if result["OK"]:
cls.consumersSet.add(result["Value"])
else:
return result
return S_OK()
@classmethod
def consumingCallback(cls, headers, message):
"""
Callback function for the MQ Consumer, called for every new message and inserting it into database.
:param headers: Headers of MQ message (not used)
:param message: Message represented as a dictionary
"""
# verify received message format
if set(message) == set(["pilotUUID", "timestamp", "source", "phase", "status", "messageContent"]):
cls.pilotsLoggingDB.addPilotsLogging(
message["pilotUUID"],
message["timestamp"],
message["source"],
message["phase"],
message["status"],
message["messageContent"],
)
types_addPilotsLogging = [str, str, str, str, str, str]
@classmethod
def export_addPilotsLogging(cls, pilotUUID, timestamp, source, phase, status, messageContent):
"""
Add new Pilots Logging entry
:param pilotUUID: Pilot reference
:param status: Pilot status
:param minorStatus: Additional status information
:param timeStamp: Date and time of status event
:param source: Source of statu information
"""
return cls.pilotsLoggingDB.addPilotsLogging(pilotUUID, timestamp, source, phase, status, messageContent)
types_getPilotsLogging = [str]
@classmethod
def export_getPilotsLogging(cls, pilotUUID):
"""
Get all Logging entries for Pilot
:param pilotUUID: Pilot reference
"""
return cls.pilotsLoggingDB.getPilotsLogging(pilotUUID)
types_deletePilotsLogging = [[str, list]]
@classmethod
def export_deletePilotsLogging(cls, pilotUUID):
"""
Delete all Logging entries for Pilot
:param pilotUUID: Pilot reference
"""
return cls.pilotsLoggingDB.deletePilotsLogging(pilotUUID)
|
ic-hep/DIRAC
|
src/DIRAC/WorkloadManagementSystem/Service/PilotsLoggingHandler.py
|
Python
|
gpl-3.0
| 3,551
|
[
"DIRAC"
] |
30d6ad659f2b2ef7a03dcb53e60abedd6d07699711c9e92592c393f20179ef5d
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""bam_handler.py module
This module contains the classes and functions to handle the
"""
import pysam
__author__ = "Ryan Abo"
__copyright__ = "Copyright 2015, Ryan Abo"
__email__ = "ryanabo@gmail.com"
__license__ = "MIT"
def trim_qual(read, min_qual, min_len):
qual_str = read.qual
q = []
coords = [0, len(qual_str)]
start = seq_trim(qual_str, min_qual)
if start == len(qual_str):
return None
else:
end = len(qual_str) - seq_trim(qual_str[::-1], min_qual)
lngth = end - start
if lngth < min_len:
return None
nseq = read.seq[start:end]
nqual = qual_str[start:end]
read.seq = nseq
read.qual = nqual
return read
def fq_line(read, indel_only, min_len, trim=True):
add_val = '0'
if indel_only:
add_val = '1'
lineout = None
if trim:
read = trim_qual(read, 5, min_len)
if read:
lineout = "@" + get_seq_readname(read) + "_" + add_val + "\n" + read.seq + "\n+\n" + read.qual + "\n"
return lineout
def get_seq_readname(read):
""" """
end = '1'
if read.is_read2:
end = '2'
return read.qname + "/" + end
def check_pair_overlap(mate_seq, read, coords, trim_dir):
""" """
nmisses = 0
add_clip = True
clip_seq = read.seq[coords[0]:coords[1]]
clip_len = coords[1] - coords[0]
if abs(read.isize) < len(read.seq):
if abs(len(read.seq) - (abs(read.isize) + 1)) >= clip_len:
add_clip = False
else:
while check_overlap(trim_dir, mate_seq, clip_seq) and nmisses < 5 and len(clip_seq) > 0:
if trim_dir == 'back':
clip_seq = clip_seq[0:(len(clip_seq) - 1)]
else:
clip_seq = clip_seq[1:len(clip_seq)]
nmisses += 1
if len(clip_seq) == 0 or nmisses == 5:
add_clip = True
else:
add_clip = False
return add_clip
def check_overlap(dir, mate_seq, clip_seq):
""" """
if dir == 'back':
return mate_seq.find(clip_seq) != (len(mate_seq) - len(clip_seq))
else:
return mate_seq.find(clip_seq) != 0
def get_clip_coords(read):
"""This will parse a cigar string for a read and determine the coordinates
of the read that are not softclipped by the aligner.
Read cigar is a list of tuples [(4,5),(0,80),(4,15)] 5 bp clipped in the start, 80 bp matching, 15 bp clipped at the end
Start: coords = [0,0]
Iter 1: coords = [5,5]
Iter 2: coords = [5,85]
Iter 3: coords = [5,85]
Args:
read: pysam read object.
Return:
clip_coords: List with two integer values indicating the coordinates of
the sequence read that are not clipped.
"""
clip_coords = [0, len(read.qual)]
# First value is start index, second value is end index.
coords = [0, 0]
for i in range(len(read.cigar)):
code, clen = read.cigar[i]
# Inc coords if not deletion or softclip
if not code == 2 and not code == 4:
coords[1] += clen
# First value is softclip, increment both by clip amount.
if code == 4:
if i == 0:
coords[0] = clen
coords[1] += clen
clip_coords = coords
return clip_coords
def seq_trim(qualStr, minQual):
"""Find the first position in a list of quality values that is above the minimum
quality value input.
Iterate over the list of quality values, starting at the first position, and
return the position where the quality if greater than minQual.
Args:
qualStr: List of quality values from pysam read object (i.e., read.qual).
These are Phred-based and assumed to be offset by 33.
minQual: Integer value of the minimum acceptable quality
Return:
counter: Integer representing the position in the list.
"""
counter = 0
while (ord(qualStr[counter]) - 33) < minQual:
counter += 1
if counter == len(qualStr):
break
return counter
def trim_coords(qualStr, minQual):
"""Searches quality values of a sequence read start->end and end->start to
determine if there is a string of low quality sequences.
Scan along the qualStr and continue while the quality is < minQual and
return the index of the last low quality score in the string.
qualStr = [1-1-1-2-2-2-2-20-20-20-30-30-30]
seq_trim(qualStr, 3) will return 6 for the start and len(qualStr) for the end.
Args:
qualStr (list): List of quality values from pysam read object (i.e., read.qual).
These are Phred-based and assumed to be offset by 33.
minQual (int): Value of the minimum acceptable Phred quality score.
Return:
three element tuple:
1. Position start where sequence quality is good (> minQual)
2. Position end where sequence quality is good (> minQual)
3. Length of the sequence that has good quality.
"""
# Scan from the start of the qualStr and stop when the base qual > minQual
start = seq_trim(qualStr, minQual)
if start == len(qualStr):
return (0, 0, 0)
else:
# Reverse qualStr and scan from the start and stop when the base qual > minQual
end = len(qualStr) - seq_trim(qualStr[::-1], minQual)
trimLength = end - start
return (start, end, trimLength)
def pe_meta(read):
"""Checks if the read is from a proper paired-end mapping, assuming an Illumina
library.
If the read is mapped in a proper pair, check if it overlaps with its paired read.
Args:
read: pysam read object
Return:
proper_map: Boolean to indicate that the read-pair is properly mapped
overlap_read: Boolean to indicate that the read-pair overlap (i.e.,
insert size < 2*read_len
"""
properMap = False
overlapReads = False
if (((read.flag == 83 or read.flag == 147) and read.tlen < 0) or ((read.flag == 99 or read.flag == 163) and read.tlen > 0)):
properMap = True
if abs(read.tlen) < (2 * len(read.seq)):
overlapReads = True
return properMap, overlapReads
def get_region_reads(bamFile, chrom, start, end):
"""Open BAM file using pysam and fetch aligned reads in the
specified region.
Args:
bamFile (str): Bam file full path, index must be in the same location
chrom (str): Chromosome name for region
start (int): Region's start position.
end (int): Region's end position.
Return:
reads (list): List containing pysam read objects
bamF (pysam bam object): Open pysam bam file object.
"""
bamF = pysam.Samfile(bamFile, 'rb')
reads = bamF.fetch(chrom, start, end)
return (reads, bamF)
def get_variant_reads(bamFile, chrom, start, end, insertSizeThresh):
"""Get the softclipped, discordant read pairs, and unmapped reads.
These reads are stored in the VarReadTracker object.
Iterate through all the reads in a region. Skip the duplicates and
qc failed reads. Store all the unmapped reads. All other reads pass
to the check_read function.
Args:
bamFile (str): Path to the bam file to open, must be indexed!
chrom (str): Chromosome of the region to extract
start (int): Region start location to extract.
end (int): Region end location to extract.
Return:
varReadTracker (VariantReadTracker): VarReadTracker object
"""
reads, bamF = get_region_reads(bamFile, chrom, start, end)
varReadTracker = VariantReadTracker(bamF, insertSizeThresh)
for read in reads:
skip = False
if read.mate_is_unmapped or read.rnext == -1:
read.mate_is_unmapped = True
if read.is_duplicate or read.is_qcfail:
skip = True
if read.is_unmapped:
varReadTracker.add_unmapped_read(read)
skip = True
if skip:
continue
varReadTracker.check_read(read)
return varReadTracker
def get_strand_str(isReverseBoolean):
strand = '+'
if isReverseBoolean:
strand = '-'
return strand
def get_strand_key(read, ordered=False):
strands = []
readStrand = '+'
if read.is_reverse:
readStrand = '-'
mateStrand = '+'
if read.mate_is_reverse:
mateStrand = '-'
strands = [readStrand, mateStrand]
if ordered:
strands.reverse()
return ':'.join(strands)
def cluster_regions(dReadLst, idx, clusterType):
distBuffer = None
clusterLst = []
for dRead in dReadLst:
if distBuffer is None:
distBuffer = dRead.readLen
# trgtStart = dRead.pos[0]
# mateStart = dRead.pos[1]
# print 'cluster_regions dRead', dRead.pos, dRead.readLen, clusterType
if len(clusterLst) == 0:
clusterLst.append([dRead.pos[idx], dRead.pos[idx] + dRead.readLen, [dRead.readInfoStr]])
# print 'Initial cluster list', clusterLst
else:
# Check overlap
add = False
for i, c in enumerate(clusterLst):
# print 'Checking read pos against cluster region', c, dRead.pos
startWithin = dRead.pos[idx] >= c[0] and dRead.pos[idx] <= c[1]
withinBuffer = dRead.pos[idx] > c[1] and dRead.pos[idx] - c[1] <= distBuffer
# print 'in check', startWithin, withinBuffer
if startWithin or withinBuffer:
readInfoLst = clusterLst[i][2]
readInfoLst.append(dRead.readInfoStr)
# print 'Add read to cluster region', clusterLst[i]
clusterLst[i] = [c[0], dRead.pos[idx] + dRead.readLen, readInfoLst]
add = True
if not add:
# print 'No add, creating new cluster region'
clusterLst.append([dRead.pos[idx], dRead.pos[idx] + dRead.readLen, [dRead.readInfoStr]])
return clusterLst
def get_cluster_membership(item, clusters, idx):
for i, cluster in enumerate(clusters):
# print cluster
# print item.pos
if item.pos[idx] >= cluster[0] and item.pos[idx] <= cluster[1]:
return i
class discReadPair:
def __init__(self, read, orderType):
self.pos = []
self.strands = []
self.readName = read.qname
self.readLen = read.rlen
self.readInfoStr = ''
# self.read = read
self.set_values(read, orderType)
def set_values(self, read, orderType):
# print 'bam_handler.py set_values() for discReadPair', read.pos, read.mpos
self.pos = [read.pos, read.mpos]
self.strands = [get_strand_str(read.is_reverse), get_strand_str(read.mate_is_reverse)]
if (orderType == 'ordered') and (read.mpos < read.pos):
# Store the read and mate ordered by chrom alignment position
self.pos.reverse()
self.strands.reverse()
self.readInfoStr = '|'.join([str(x) for x in [read.qname, self.strands[0], self.strands[1], read.tlen, read.mpos]])
# print 'bam_hanlder.py set_values() readInfoStr', self.readInfoStr
class discReads:
"""
"""
def __init__(self, insertSizeThresh):
self.reads = {'inter': {}, 'intra': {}}
self.insertSizeThresh = insertSizeThresh
self.checkedIds = {}
self.clusters = {}
self.disc = {}
def add_inter_discread(self, bam, read):
# print 'bam_handler.py add_inter_discread()', read
dRead = discReadPair(read, 'unordered')
mateRefId = bam.getrname(read.rnext)
if mateRefId not in self.reads['inter']:
self.reads['inter'][mateRefId] = {}
strandKey = get_strand_key(read)
if strandKey not in self.reads['inter'][mateRefId]:
self.reads['inter'][mateRefId][strandKey] = []
self.reads['inter'][mateRefId][strandKey].append(dRead)
# print 'bam_handler.py add_inter_discread() self.reads inter', mateRefId, strandKey, '\n'
# for dRead in self.reads['inter'][mateRefId][strandKey]:
# print '\t', dRead.readInfoStr
if mateRefId not in self.disc:
self.disc[mateRefId] = []
self.disc[mateRefId].append((read.pos, read.mpos))
# print 'bam_handler.py add_inter_discread() disc dictionary', mateRefId, self.disc[mateRefId]
def add_intra_discread(self, read, overlapping_reads):
discType = 'other'
dRead = discReadPair(read, True)
disc_ins_size = abs(read.tlen) >= self.insertSizeThresh
strandKey = ''
if (read.is_reverse and read.mate_is_reverse) or (not read.is_reverse and not read.mate_is_reverse):
discType = 'inv'
strandKey = get_strand_key(read)
elif (read.is_reverse and not read.mate_is_reverse and read.pos < read.mpos) or (not read.is_reverse and read.mate_is_reverse and read.pos > read.mpos):
discType = 'td'
strandKey = '-:+'
elif disc_ins_size:
discType = 'dist'
strandKey = get_strand_key(read, True)
elif (read.is_reverse and not read.mate_is_reverse and read.pos < read.mpos) or (not read.is_reverse and read.mate_is_reverse and read.mpos < read.pos):
discType = 'other'
strandKey = get_strand_key(read, True)
else:
dRead = None
if dRead is None:
return
if discType not in self.reads['intra']:
self.reads['intra'][discType] = {}
if strandKey not in self.reads['intra'][discType]:
self.reads['intra'][discType][strandKey] = []
self.reads['intra'][discType][strandKey].append(dRead)
if read.tid not in self.disc:
self.disc[read.tid] = []
self.disc[read.tid].append((read.pos, read.mpos))
def add_read_pair(self, bam, read, overlapping_reads):
"""
Args:
read:
Return:
None
"""
if read.qname not in self.checkedIds:
self.checkedIds[read.qname] = read.qname
else:
return
if read.mapq == 0 or read.mate_is_unmapped:
return
# Extract read-pairs that are mapped to different chromosomes or fair apart.
diff_chroms = read.rnext != -1 and read.tid != read.rnext
if read.tid == read.rnext and not overlapping_reads:
self.add_intra_discread(read, overlapping_reads)
elif diff_chroms:
# print 'bam_handler.py add_read_pair(), diff_chroms', diff_chroms, read.rnext, read.tid, read.rnext
self.add_inter_discread(bam, read)
def cluster_discreads(self):
"""self.reads is a dictionary with 3 levels
1. Inter / intra
2. Chrom (inter) / inv, td, dist, other (intra)
3. -:+, -:-, +:+, +:-
4. List of discRead objects
"""
# print 'cluster_discreads()', '*'*25
for key1 in self.reads:
# print 'key1', key1
d1 = self.reads[key1]
for key2 in d1:
# print 'key2', key2
d2 = d1[key2]
interClusterClusters = {}
for key3 in d2:
# print 'key3', key3
dReadsLst = d2[key3]
# print 'read list', dReadsLst
srt1 = sorted(dReadsLst, key=lambda x: x.pos[0])
srt2 = sorted(dReadsLst, key=lambda x: x.pos[1])
c1 = cluster_regions(srt1, 0, 'target')
c2 = cluster_regions(srt2, 1, 'mate')
for item in dReadsLst:
# print 'Disc read pair obj', item.readInfoStr
cIdx1 = get_cluster_membership(item, c1, 0)
cIdx2 = get_cluster_membership(item, c2, 1)
regionPairKey = '|'.join([key1, key2, key3, str(cIdx1), str(cIdx2)])
# print 'regionPairKey', regionPairKey
leftBrkpt = c1[cIdx1][0]
rightBrkpt = c2[cIdx2][0]
leftStrand, rightStrand = key3.split(':')
if leftStrand == '+':
leftBrkpt = c1[cIdx1][1]
if rightStrand == '+':
rightBrkpt = c2[cIdx2][1]
if regionPairKey not in self.clusters:
self.clusters[regionPairKey] = {'readCount': 0, # Read count for sub cluster, based on strand
'interClusterCount': 0, # Read count for cluster ignoring strands, this will be used for interchrom clustering.
'leftBounds': c1[cIdx1][0:2],
'rightBounds': c2[cIdx2][0:2],
'leftBrkpt': leftBrkpt,
'rightBrkpt': rightBrkpt,
'clusterId': len(self.clusters) + 1}
if key1 == 'inter':
# print 'Inter check clustering', interClusterClusters
matchFound = False
for clusterKey in interClusterClusters:
# print 'Checking clustering of inter clusters', clusterKey, self.clusters[clusterKey]['leftBrkpt'], regionPairKey, leftBrkpt
# print 'Checking clustering of inter clusters', clusterKey, self.clusters[clusterKey]['rightBrkpt'], regionPairKey, rightBrkpt
if (abs(self.clusters[clusterKey]['leftBrkpt'] - leftBrkpt) < 1000) and (abs(self.clusters[clusterKey]['rightBrkpt'] - rightBrkpt) < 1000):
# Merge the clusters
interClusterClusters[clusterKey].append(regionPairKey)
matchFound = True
break
if not matchFound:
# print 'No match', regionPairKey
interClusterClusters[regionPairKey] = [regionPairKey]
self.clusters[regionPairKey]['readCount'] += 1
self.clusters[regionPairKey]['interClusterCount'] += 1
if len(interClusterClusters) > 0:
for clusterKey in interClusterClusters:
totalCounts = 0
for cKey in interClusterClusters[clusterKey]:
totalCounts += self.clusters[cKey]['readCount']
for cKey in interClusterClusters[clusterKey]:
self.clusters[cKey]['interClusterCount'] = totalCounts
self.clusters[cKey]['clusterId'] = self.clusters[clusterKey]['clusterId']
# print 'Complete clusters', self.clusters
return self.clusters
def check_inv_readcounts(self, brkpts):
""" """
brkpt1 = min(brkpts)
brkpt2 = max(brkpts)
counts = 0
bpBuffer = 50
# print 'Inversion reads', self.reads['intra']['inv']
# print 'Brkpts', brkpts
if 'inv' not in self.reads['intra']:
return counts
for strand in self.reads['intra']['inv']:
lStrand, rStrand = strand.split(':')
strandReads = self.reads['intra']['inv'][strand]
for dRead in strandReads:
# print strand, dRead.pos
if lStrand == '+' and rStrand == '+':
if (dRead.pos[0] <= (brkpt1 + bpBuffer)) and (dRead.pos[1] <= (brkpt2 + bpBuffer) and dRead.pos[1] >= (brkpt1 - bpBuffer)):
counts += 1
else:
# print dRead.pos, brkpt1, brkpt2
if (dRead.pos[0] <= (brkpt2 + bpBuffer) and dRead.pos[0] >= (brkpt1 - bpBuffer)) and dRead.pos[1] >= (brkpt2 - bpBuffer):
counts += 1
# print 'Counts', counts
return counts
def check_td_readcounts(self, brkpts):
""" """
brkpt1 = min(brkpts)
brkpt2 = max(brkpts)
counts = 0
bpBuffer = 50
if 'td' not in self.reads['intra']:
return counts
for dRead in self.reads['intra']['td']['-:+']:
if (dRead.pos[0] >= (brkpt1 - bpBuffer) and dRead.pos[0] <= (brkpt2 + bpBuffer)) and (dRead.pos[1] <= (brkpt2 + bpBuffer) and dRead.pos[1] >= (brkpt1 - bpBuffer)):
counts += 1
return counts
def check_other_readcounts(self, brkpts):
""" """
counts = [0] * len(brkpts)
for i in range(len(brkpts)):
b = brkpts[i]
if 'other' not in self.reads['intra']:
return max(counts)
for strand in self.reads['intra']['other']:
lStrand, rStrand = strand.split(':')
strandReads = self.reads['intra']['other'][strand]
for dRead in strandReads:
if abs(dRead.pos[0] - b) <= 300 or abs(dRead.pos[1] - b) <= 300:
counts[i] += 1
return max(counts)
def check_inter_readcounts(self, targetBrkptChr, targetBrkptBp, nonTargetBrkpts):
""" """
# counts = [0] * len(brkpts)
# for i in range(len(brkpts)):
# b = brkpts[i]
# if 'other' not in self.reads['intra']:
# break
# for strand in self.reads['intra']['other']:
# lStrand, rStrand = strand.split(':')
# strandReads = self.reads['intra']['other'][strand]
# for dRead in strandReads:
# if abs(dRead.pos[0] - b) <= 300 or abs(dRead.pos[1] - b) <= 300:
# counts[i] += 1
# return max(counts)
discReadCount = 0
# print 'sv_caller.py get_disc_read_count', targetBrkptChr, targetBrkptBp
# print 'Read storage dict', self.reads['inter']
for otherBrkpts in nonTargetBrkpts:
nonTargetBrkptChr = otherBrkpts[0].replace('chr', '')
nonTargetBrkptBps = otherBrkpts[1:]
# print 'Non-target brkpts', nonTargetBrkptChr, nonTargetBrkptBps
for nonTargetBrkptBp in nonTargetBrkptBps:
# print 'non-target brkpt', nonTargetBrkptBp
if nonTargetBrkptChr in self.reads['inter']:
for strand in self.reads['inter'][nonTargetBrkptChr]:
for discReadPair in self.reads['inter'][nonTargetBrkptChr][strand]:
d1 = abs(targetBrkptBp - discReadPair.pos[0])
d2 = abs(nonTargetBrkptBp - discReadPair.pos[1])
# print 'distances', d1, d2
if d1 <= 1000 and d2 <= 1000:
discReadCount += 1
return discReadCount
class VariantReadTracker:
"""A class to track the reads that are identified to be 'misaligned' to
the reference sequence.
Attributes:
pair_indices (dict): Dictionary of a dictionary tracking the index of paired
reads in the valid list.
valid (list): List of read objects that are valid to consider for extraction.
disc (dict): Dictionary of read IDs for read-pairs that are discordantly mapped.
unmapped (dict): Dictionary of unmapped reads with mapped mate in the region.
unmapped_keep (list): List containing names of reads that are mapped but their mate is unmapped and wasn't
kept on the first pass.
inv (list): List of tuples, each containing read-pair information that have alignments
suggestive of an inversion event.
td (list): List of tuples, each containing read-pair information that have alignments
suggestive of a tandem dup event.
other (list): List of tuples, each containing read-pair information that have alignments
suggestive of some uncategorized event.
sv (dict): Dictionary
bam (str): Bam file source the reads came from.
"""
def __init__(self, bamFile, insertSizeThresh):
"""
"""
self.pair_indices = {}
self.valid = []
self.discReadTracker = discReads(insertSizeThresh)
self.unmapped = {}
self.unmapped_keep = []
self.sv = {}
self.bam = bamFile
def check_read(self, read):
"""Stores all reads in the self.pair_indices dictionary if it is
mapped.
Check if the read is part of a discordantly mapped read pair.
Check if the read is properly mapped, as indicated by bam encoding, and
whether the read overlaps with its pair.
self.valid = [(read, proper_map, overlapping_reads), (read, proper_map, overlapping_reads), ...]
self.pair_indices[read.qname][1 (read1)/0 (read2)] = index of read in self.valid
Args:
read (pysam read obj): An aligned sequence read.
Return:
None
"""
proper_map, overlapping_reads = pe_meta(read)
if read.qname not in self.pair_indices and not read.mate_is_unmapped:
self.discReadTracker.add_read_pair(self.bam, read, overlapping_reads)
self.valid.append((read, proper_map, overlapping_reads))
if read.qname not in self.pair_indices and not read.mate_is_unmapped:
self.pair_indices[read.qname] = {}
if read.qname in self.pair_indices:
self.pair_indices[read.qname][int(read.is_read1)] = len(self.valid) - 1
def add_unmapped_read(self, read):
"""Add read to unmapped dictionary with name as the key, object as the value.
Args:
read (pysam read obj): pysam read object.
Return:
None
"""
self.unmapped[read.qname] = read
def check_clippings(self, kmer_size, region_start_pos, region_end_pos):
"""
"""
for read_vals in self.valid:
read, proper_map, overlap_reads = read_vals
if read.cigar or len(read.cigar) > 1:
good_qual_coords = trim_coords(read.qual, 3) # Get the (start, end, length) of the high-quality sequence bases.
clip_coords = get_clip_coords(read) # Get the [start, end] of the non-clipped sequence bases.
self.extract_clippings(read_vals, clip_coords, good_qual_coords, kmer_size)
if (read.pos >= region_start_pos and read.pos <= region_end_pos) and read.mapq > 0 and read.mate_is_unmapped:
self.unmapped_keep.append(read.qname)
def extract_clippings(self, read_vals, clip_coords, good_qual_coords, kmer_size):
"""
"""
read, proper_map, overlap_reads = read_vals
clip_seqs = {'clipped': [], 'buffered': []}
if clip_coords[0] <= good_qual_coords[0] and clip_coords[1] >= good_qual_coords[1]:
return
new_clip_coords = [0, 0]
add_clip = [False, False]
indel_only = False
start_clip = clip_coords[0] > 0
end_clip = clip_coords[1] < len(read.qual)
if start_clip and end_clip:
add_clip = [True, True]
else:
if start_clip:
add_clip[0] = True
new_clip_coords = [0, clip_coords[0]]
if overlap_reads and read.is_reverse:
mate_seq = self.valid[self.pair_indices[read.qname][int(read.is_read1)]][0].seq
add_clip[0] = check_pair_overlap(mate_seq, read, [0, clip_coords[0]], 'back')
if proper_map:
if read.is_reverse:
indel_only = True
else:
indel_only = False
elif end_clip:
new_clip_coords = [clip_coords[1], len(read.seq)]
add_clip[1] = True
if overlap_reads and not read.is_reverse:
mate_seq = self.valid[self.pair_indices[read.qname][int(read.is_read1)]][0].seq
add_clip[1] = check_pair_overlap(mate_seq, read, [clip_coords[1], len(read.seq)], 'front')
if proper_map:
if read.is_reverse:
indel_only = indel_only and False
else:
indel_only = indel_only and True
final_add = add_clip[0] or add_clip[1]
if add_clip[0]:
clip_seqs['buffered'].append(read.seq[0:(clip_coords[0] + kmer_size)])
clip_seqs['clipped'].append(read.seq[0:clip_coords[0]])
if add_clip[1]:
clip_seqs['buffered'].append(read.seq[(clip_coords[1] - kmer_size):len(read.seq)])
clip_seqs['clipped'].append(read.seq[clip_coords[1]:len(read.seq)])
if final_add:
self.sv[get_seq_readname(read)] = (read, clip_seqs, new_clip_coords, indel_only)
def write_seqs(self, clipped_fa, reads_fq, sv_bam, kmer_size):
"""
"""
for name in self.unmapped_keep:
if name in self.unmapped:
read = self.unmapped[name]
self.sv[get_seq_readname(read)] = (read, None, None, False)
lout = ">" + read.qname + "\n" + str(read.seq)
clipped_fa.write(lout + "\n")
for name in self.sv:
read, clip_seqs, clip_coords, indel_only = self.sv[name]
if sv_bam:
sv_bam.write(read)
lout = fq_line(read, indel_only, kmer_size, True)
if lout:
reads_fq.write(lout)
if clip_seqs:
for clip in clip_seqs['buffered']:
clipped_fa.write(">" + name + "\n" + clip + "\n")
self.bam.close()
def clear_sv_reads(self):
"""
"""
self.sv = None
def get_disc_reads(self):
"""This function needs to be updated to handle the new disc read storage.
"""
return self.discReadTracker.disc
def cluster_discreads(self):
"""
"""
dReadClusters = self.discReadTracker.cluster_discreads()
return dReadClusters
def check_inv_readcounts(self, brkpts):
"""
"""
return self.discReadTracker.check_inv_readcounts(brkpts)
def check_td_readcounts(self, brkpts):
""" """
return self.discReadTracker.check_td_readcounts(brkpts)
def check_other_readcounts(self, brkpts):
""" """
return self.discReadTracker.check_other_readcounts(brkpts)
def check_inter_readcounts(self, targetChr, targetBps, nonTargetBrkpts):
""" """
return self.discReadTracker.check_inter_readcounts(targetChr, targetBps, nonTargetBrkpts)
|
a-bioinformatician/BreaKmer
|
breakmer/processor/bam_handler.py
|
Python
|
mit
| 31,353
|
[
"pysam"
] |
413f2e60661545e0b22a3cbfe059cd62da05ac0ebb47d4fd77c8f55a84493534
|
blacklisted_passwords = [
'govuknotify',
'GOVUKnotify',
'GOV.UK Notify',
'GOV.UK notify',
'gov.uk notify'
] + [
'11111111',
'12345678',
'123456789',
'access14',
'alejandra',
'alejandro',
'baseball',
'bigdaddy',
'butthead',
'cocacola',
'computer',
'consumer',
'corvette',
'danielle',
'dolphins',
'einstein',
'estrella',
'firebird',
'football',
'hardcore',
'iloveyou',
'internet',
'jennifer',
'mariposa',
'marlboro',
'maverick',
'mercedes',
'michelle',
'midnight',
'mistress',
'mountain',
'nicholas',
'password',
'password1',
'password12',
'password123',
'princess',
'qwertyui',
'redskins',
'redwings',
'rush2112',
'samantha',
'scorpion',
'sebastian',
'srinivas',
'startrek',
'starwars',
'steelers',
'sunshine',
'superman',
'swimming',
'tequiero',
'trustno1',
'victoria',
'whatever',
'xxxxxxxx',
'liverpoo',
'shithead',
'88888888',
'metallic',
'jonathan',
'asdfasdf',
'godzilla',
'williams',
'lifehack',
'platinum',
'garfield',
'69696969',
'jordan23',
'bullshit',
'airborne',
'elephant',
'explorer',
'christin',
'december',
'benjamin',
'dickhead',
'brooklyn',
'michigan',
'87654321',
'guinness',
'snowball',
'alexande',
'passw0rd',
'lasvegas',
'slipknot',
'kimberly',
'1q2w3e4r',
'carolina',
'colorado',
'creative',
'bollocks',
'darkness',
'asdfghjk',
'poohbear',
'nintendo',
'november',
'lacrosse',
'paradise',
'maryjane',
'spitfire',
'anderson',
'cherokee',
'drowssap',
'marshall',
'1qaz2wsx',
'caroline',
'franklin',
'snickers',
'courtney',
'westside',
'patricia',
'semperfi',
'freeuser',
'babygirl',
'champion',
'softball',
'security',
'wildcats',
'veronica',
'abcd1234',
'wolverin',
'remember',
'freepass',
'pearljam',
'peekaboo',
'budlight',
'electric',
'stargate',
'brittany',
'scotland',
'swordfis',
'blink182',
'virginia',
'passport',
'aaaaaaaa',
'rolltide',
'bulldogs',
'liverpool',
'chevelle',
'mitchell',
'spiderma',
'patriots',
'cardinal',
'kawasaki',
'ncc1701d',
'airplane',
'scarface',
'elizabet',
'wolfpack',
'lawrence',
'american',
'stingray',
'simpsons',
'panthers',
'pussycat',
'loverboy',
'tarheels',
'wolfgang',
'testtest',
'michael1',
'pakistan',
'infinity',
'letmein1',
'hercules',
'billybob',
'pavilion',
'changeme',
'darkside',
'zeppelin',
'darkstar',
'charlie1',
'wrangler',
'qwerty12',
'bobafett',
'business',
'sterling',
'babydoll',
'cheyenne',
'longhorn',
'presario',
'mustang1',
'21122112',
'q1w2e3r4',
'12341234',
'devildog',
'bluebird',
'metallica',
'enterpri',
'blizzard',
'asdf1234',
'harrison',
'thailand',
'1234567890',
'cadillac',
'hellfire',
'lonewolf',
'12121212',
'fireball',
'precious',
'engineer',
'basketba',
'valentin',
'wetpussy',
'morpheus',
'hotstuff',
'fuck_inside',
'goldberg',
'wrinkle1',
'serenity',
'99999999',
'bigboobs',
'chocolat',
'christia',
'birthday',
'stephani',
'1234qwer',
'98765432',
'77777777',
'highland',
'seminole',
'airforce',
'hamilton',
'buckeyes',
'abcdefgh',
'goldfish',
'deftones',
'icecream',
'pleasure',
'juventus',
'ncc1701e',
'51505150',
'cavalier',
'aardvark',
'babylon5',
'savannah',
'yankees1',
'fredfred',
'concrete',
'shamrock',
'atlantis',
'wordpass',
'predator',
'marathon',
'montreal',
'kathleen',
'jessica1',
'diamonds',
'stallion',
'letmein2',
'clitoris',
'sundance',
'renegade',
'hollywoo',
'hello123',
'sweetpea',
'stocking',
'campbell',
'christop',
'rockstar',
'geronimo',
'chandler',
'lovelove',
'greenday',
'987654321',
'creampie',
'trombone',
'55555555',
'mongoose',
'tottenha',
'butterfl',
'clifford',
'fuckyou2',
'infantry',
'skywalke',
'raistlin',
'vanhalen',
'sherlock',
'dietcoke',
'ultimate',
'superfly',
'freedom1',
'drpepper',
'lesbians',
'musicman',
'warcraft',
'microsoft',
'morrison',
'isabelle',
'thuglife',
'stonecol',
'logitech',
'florence',
'1passwor',
'bluemoon',
'22222222',
'stardust',
'margaret',
'66666666',
'charlott',
'waterloo',
'11223344',
'standard',
'alexandr',
'hannibal',
'frontier',
'welcome1',
'spanking',
'japanese',
'kristina',
'deepthroat',
'bonehead',
'showtime',
'squirrel',
'mustangs',
'septembe',
'leonardo',
'makaveli',
'vacation',
'passwor1',
'columbia',
'napoleon',
'motorola',
'william1',
'matthew1',
'robinson',
'penguins',
'8j4ye3uz',
'californ',
'qwertyuiop',
'portland',
'asdfghjkl',
'overlord',
'stranger',
'socrates',
'spiderman',
'13131313',
'national',
'intrepid',
'megadeth',
'bigballs',
'chargers',
'discover',
'isabella',
'megapass',
'grateful',
'mushroom',
'cristina',
'hongkong',
'basketball',
'satan666',
'kingkong',
'penelope',
'thompson',
'anything',
'knickers',
'playtime',
'lightnin',
'slapshot',
'titleist',
'werewolf',
'fernando',
'blackcat',
'tacobell',
'kittycat',
'thunder1',
'thankyou',
'scoobydo',
'coltrane',
'lonestar',
'heather1',
'beefcake',
'zzzzzzzz',
'personal',
'anthony1',
'fuckface',
'lowrider',
'punkrock',
'dodgeram',
'dingdong',
'qqqqqqqq',
'johnjohn',
'asshole1',
'crusader',
'syracuse',
'meridian',
'turkey50',
'keyboard',
'ilovesex',
'blackman',
'richmond',
'sandiego',
'cooldude',
'mariners',
'caliente',
'fletcher',
'porsche9',
'kangaroo',
'springer',
'goodtime',
'chelsea1',
'freckles',
'nebraska',
'webmaster',
'blueeyes',
'director',
'monopoly',
'blackjac',
'southern',
'peterpan',
'fuckyou1',
'a1b2c3d4',
'sentinel',
'richard1',
'1234abcd',
'guardian',
'candyman',
'mandingo',
'munchkin',
'billyboy',
'rootbeer',
'assassin',
'frederic',
'giovanni',
'scarlett',
'achilles',
'warriors',
'plymouth',
'cameltoe',
'fuckfuck',
'sithlord',
'backdoor',
'chevrole',
'lorraine',
'cosworth',
'eternity',
'verbatim',
'chocolate',
'deadhead',
'pineappl',
'rosemary',
'porkchop',
'blackdog',
'alexander',
'valhalla',
'santiago',
'portugal',
'1qazxsw2',
'stripper',
'sebastia',
'hurrican',
'1x2zkg8w',
'atlantic',
'hyperion',
'44444444',
'skittles',
'hastings',
'gangbang',
'sailboat',
'immortal',
'maryland',
'columbus',
'beautiful',
'swordfish',
'ncc1701a',
'spartans',
'threesom',
'dilligaf',
'pinkfloy',
'catalina',
'formula1',
'scooter1',
'colombia',
'lancelot',
'angelica',
'rockhard',
'poontang',
'starship',
'starbuck',
'catherin',
'kentucky',
'33333333',
'12344321',
'sapphire',
'raiders1',
'excalibu',
'imperial',
'phillips',
'golfball',
'front242',
'macdaddy',
'qwer1234',
'cowboys1',
'dannyboy',
'martinez',
'aquarius',
'pppppppp',
'clarence',
'eatpussy',
'beatrice',
'phillies',
'research',
'gggggggg',
'doughboy',
'lollipop',
'qazwsxed',
'crazybab',
'brothers',
'butthole',
'rightnow',
'greatone',
'gateway1',
'wildfire',
'jackson1',
'0.0.0.000',
'snuggles',
'phoenix1',
'technics',
'gesperrt',
'brucelee',
'woofwoof',
'theodore',
'richards',
'punisher',
'username',
'bunghole',
'elizabeth',
'lifetime',
'masterbate',
'diamond1',
'abnormal',
'davidson',
'starfish',
'penetration',
'michaela',
'caligula',
'railroad',
'bradford',
'military',
'bearbear',
'patrick1',
'christine',
'swinging',
'labrador',
'justdoit',
'meatball',
'saturday',
'defender',
'piercing',
'microsof',
'mechanic',
'robotech',
'universe',
'newpass6',
'hellyeah',
'zaq12wsx',
'spectrum',
'jjjjjjjj',
'oklahoma',
'mmmmmmmm',
'blueblue',
'wolverine',
'sniffing',
'keystone',
'bbbbbbbb',
'handsome',
'tttttttt',
'ssssssss',
'somethin',
'melissa1',
'marcius2',
'godsmack',
'rangers1',
'deeznuts',
'kingston',
'yosemite',
'tommyboy',
'masterbating',
'marianne',
'happyday',
'manchest',
'unbelievable',
'aberdeen',
'nathalie',
'intercourse',
'supersta',
'bcfields',
'hardrock',
'children',
'commando',
'sinclair',
'squerting',
'jeanette',
'meathead',
'gandalf1',
'magnolia',
'kenworth',
'redalert',
'homemade',
'webmaste',
'insertion',
'temptress',
'gretchen',
'celebrity',
'ragnarok',
'trinidad',
'kingfish',
'blackhaw',
'thursday',
'meatloaf',
'interacial',
'streaming',
'pertinant',
'pool6123',
'animated',
'gordon24',
'fantasies',
'touching',
'homepage',
'ejaculation',
'whocares',
'jamesbon',
'amsterda',
'february',
'luckydog',
'businessbabe',
'brandon1',
'experience',
'software',
'thirteen',
'rasputin',
'greenbay',
'pa55word',
'contortionist',
'sneakers',
'sonyfuck',
'test1234',
'roadkill',
'cheerleaers',
'madeline',
'christian',
'brighton',
'housewifes',
'emmanuel',
'bigmoney',
'seductive',
'sexygirl',
'canadian',
'gangbanged',
'crawford',
'hotpussy',
'implants',
'intruder',
'andyod22',
'barcelon',
'chainsaw',
'chickens',
'downtown',
'magicman',
'clevelan',
'designer',
'budweise',
'experienced',
'pitchers',
'passwords',
'jeremiah',
'alliance',
'halflife',
'saratoga',
'positive',
'transexual',
'close-up',
'sunnyday',
'starfire',
'pictuers',
'testing1',
'tiberius',
'lisalisa',
'golfgolf',
'flounder',
'majestic',
'trailers',
'mikemike',
'whitesox',
'angelina',
'goodluck',
'charlton',
'fingerig',
'gallaries',
'lockerroom',
'treasure',
'absolutely',
'homepage-',
'beerbeer',
'testerer',
'fordf150',
'pa55w0rd',
'kamikaze',
'japanees',
'masterbaiting',
'callaway',
'panasoni',
'housewife',
'18436572',
'sullivan',
'terrapin',
'masturbation',
'hardcock',
'freeporn',
'pornographic',
'traveler',
'moneyman',
'shopping',
'thumbnils',
'amateurs',
'apollo13',
'goldwing',
'doghouse',
'pounding',
'truelove',
'underdog',
'wrestlin',
'sherwood',
'johannes',
'balloons',
'happy123',
'flamingo',
'paintbal',
'llllllll',
'twilight',
'christie',
'bullseye',
'knickerless',
'binladen',
'peterson',
'thanatos',
'albatros',
'getsdown',
'nwo4life',
'underwear',
'dddddddd',
'deeznutz',
'enterprise',
'misfit99',
'solution',
'meredith',
'barefoot',
'50spanks',
'scandinavian',
'original',
'shannon1',
'techniques',
'chemical',
'salvador',
'manchester',
'buckshot',
'thegreat',
'goldstar',
'triangle',
'kristine',
'snowboar',
'penetrating',
'roadking',
'rockford',
'chicago1',
'ferrari1',
'galeries',
'godfathe',
'gargoyle',
'gangster',
'pussyman',
'pooppoop',
'newcastl',
'mortgage',
'snoopdog',
'assholes',
'property',
'broadway',
'butterfly',
'earthlink',
'westwood',
'blackbir',
'slippery',
'pianoman',
'tomorrow',
'roadrunn',
'attitude',
'seahawks',
'tunafish',
'cinnamon',
'northern',
'23232323',
'zerocool',
'limewire',
'films+pic+galeries',
'francois',
'fuckthis',
'girfriend',
'uncencored',
'chrisbln',
'netscape',
'hhhhhhhh',
'knockers',
'tazmania',
'pharmacy',
'arsenal1',
'anaconda',
'australi',
'gotohell',
'bulldog1',
'monalisa',
'whiteout',
'james007',
'bitchass',
'southpar',
'lionking',
'megatron',
'hawaiian',
'gymnastic',
'panther1',
'wp2003wp',
'passwort',
'friendly',
'oooooooo',
'bullfrog',
'holyshit',
'jasmine1',
'sergeant',
'babyblue',
'pass1234',
'poseidon',
'confused',
'hollywood',
'insertions',
'juliette',
'hayabusa',
'hawkeyes',
'geoffrey',
'chuckles',
'hounddog',
'philippe',
'thunderb',
'marino13',
'handyman',
'cerberus',
'gamecock',
'magician',
'preacher',
'chrysler',
'contains',
'hedgehog',
'hoosiers',
'dutchess',
'wareagle',
'ihateyou',
'sunflowe',
'senators',
'terminal',
'laurence',
'maradona',
'america1',
'chicken1',
'passpass',
'r2d2c3po',
'myxworld',
'missouri',
'wishbone',
'infiniti',
'wonderboy',
'stanford',
'smeghead',
'titanium',
'charlene',
'fishing1',
'fullmoon',
'absolute',
'seinfeld',
'pingpong',
'matthews',
'recovery',
'babyface',
'gladiato',
'paranoid',
'packers1',
'longjohn',
'clarinet',
'mortimer',
'modelsne',
'vladimir',
'together',
'avalanch',
'55bgates',
'cccccccc',
'paradigm',
'operator',
'valencia',
'cocksuck',
'creature',
'borussia',
'browning',
'heritage',
'millions',
'starcraf',
'spaceman',
'chester1',
'rrrrrrrr',
'sandwich',
'magazine',
'buttfuck',
'yeahbaby',
'11235813',
'bangbang',
'charles1',
'ffffffff',
'doberman',
'overkill',
'claymore',
'brewster',
'electron',
'eastside',
'minimoni',
'wildbill',
'wildcard',
'yyyyyyyy',
'sweetnes',
'skywalker',
'alphabet',
'babybaby',
'graphics',
'florida1',
'flexible',
'fuckinside',
'ursitesux',
'christma',
'wwwwwwww',
'just4fun',
'rebecca1',
'adrienne',
'19691969',
'silverad',
'rhiannon',
'10101010',
'ashleigh',
'qwerasdf',
'presiden',
'newyork1',
'brigitte',
'buddyboy',
'heineken',
'millwall',
'beautifu',
'sinister',
'smashing',
'teddybea',
'ticklish',
'lipstick',
'reynolds',
'applepie',
'digital1',
'dinosaur',
'icehouse',
'insanity',
'bluefish',
'strength',
'sentnece',
'temppass',
'medicine',
'hahahaha',
'casanova',
'fountain',
'dolphin1',
'porsche1',
'vampires',
'highheel',
'kkkkkkkk',
'illinois',
'21212121',
'stonecold',
'testpass',
'jiggaman',
'federico',
'scorpio1',
'rt6ytere',
'madison1',
'coolness',
'christina',
'coldbeer',
'brittney',
'washingt',
'stephanie',
'shepherd',
'tiffany1',
'mephisto',
'dragonba',
'nygiants',
'password2',
'corleone',
'kittykat',
'vikings1',
'splinter',
'pipeline',
'meowmeow',
'chestnut',
'longdong',
'quant4307s',
'eastwood',
'moonligh',
'illusion',
'jayhawks',
'swingers',
'stefanie',
'jefferso',
'michael2',
'fastball',
'scrabble',
'dirtbike',
'customer',
'nemrac58',
'bobdylan',
'hopeless',
'kcj9wx5n',
'killbill',
'volkswag',
'windmill',
'iloveyou1',
'starligh',
'soulmate',
'mcdonald',
'rochelle',
'oblivion',
'valkyrie',
'concorde',
'costello',
'delaware',
'nocturne',
'herewego',
'earnhard',
'eeeeeeee',
'mobydick',
'reddevil',
'reckless',
'radiohea',
'coolcool',
'classics',
'choochoo',
'wireless',
'bigblock',
'summer99',
'sexysexy',
'platypus',
'telephon',
'12qwaszx',
'fishhead',
'paramedi',
'lonesome',
'katherin',
'moonbeam',
'monster1',
'monkeybo',
'windsurf',
'31415926',
'smoothie',
'snowflak',
'playstat',
'playboy1',
'roadster',
'hardware',
'captain1',
'undertak',
'uuuuuuuu',
'criminal',
'1a2b3c4d',
'thedoors',
'annabell',
'catwoman',
'faithful',
'farscape',
'genesis1',
'pumpkins',
'training',
'islander',
'jamesbond',
'19841984',
'shitface',
'maxwell1',
'armstron',
'alejandr',
'augustus',
'care1839',
'fantasia',
'freefall',
'sandrine',
'qwerqwer',
'crystal1',
'nineinch',
'broncos1',
'winston1',
'warrior1',
'iiiiiiii',
'iloveyou2',
'straight',
'specialk',
'tinkerbe',
'jellybea',
'cbr900rr',
'gabriell',
'gertrude',
'glennwei',
'sausages',
'vanguard',
'trinitro',
'eldorado',
'whiskers',
'wildwood',
'istheman',
'interest',
'25802580',
'woodland',
'strawber',
'amsterdam',
'catherine',
'football1',
'vancouve',
'vauxhall',
'acidburn',
'myspace1',
'buttercu',
'minemine',
'bigpoppa',
'blackout',
'blowfish',
'talisman',
'sundevil',
'shanghai',
'spencer1',
'slowhand',
'jonathon',
'michaels',
'resident',
'redbaron',
'andromed',
'harddick',
'5wr2i7h8',
'charlotte',
'fredrick',
'francesc',
'ferguson',
'fairlane',
'dogpound',
'pornporn',
'clippers',
'daylight',
'nnnnnnnn',
'budapest',
'whistler',
'whatwhat',
'wanderer',
'idontkno',
'thisisit',
'robotics',
'gonzalez',
'drummer1',
'private1',
'cornwall',
'christopher',
'corvet07',
'iverson3',
'bluesman',
'terminat',
'johnson1',
'bastards',
'fuckoff1',
'doomsday',
'pornking',
'bookworm',
'highbury',
'mischief',
'ministry',
'bigbooty',
'yogibear',
'september',
'lkjhgfds',
'123123123',
'carpedie',
'foxylady',
'gatorade',
'valdepen',
'deadpool',
'hotmail1',
'kordell1',
'vvvvvvvv',
'jackson5',
'bergkamp',
'zanzibar',
'services',
'sheridan',
'checkers',
'luv2epus',
'rainbow6',
'qwerty123',
'commande',
'nightwin',
'hotmail0',
'enternow',
'viewsoni',
'berkeley',
'woodstoc',
'starstar',
'patience',
'hawaii50',
'gorgeous',
'challeng',
'callisto',
'firewall',
'firefire',
'passmast',
'transfer',
'clarissa',
'moonshin',
'jakejake',
'bluejays',
'southpark',
'tomahawk',
'leedsutd',
'jermaine',
'jeepster',
'josephin',
'matthias',
'marriage',
'antelope',
'cabernet',
'cheshire',
'california',
'fuckhead',
'dominion',
'trucking',
'nostromo',
'honolulu',
'dynamite',
'mollydog',
'windows1',
'washburn',
'vincent1',
'irishman',
'bearcats',
'sylveste',
'marijuan',
'reddwarf',
'12312312',
'hardball',
'goldfing',
'chambers',
'fandango',
'festival',
'scrapper',
'cromwell',
'entrance',
'klondike',
'mohammed',
'insomnia',
'24682468',
'24242424',
'billbill',
'blessing',
'solitude',
'pimpdadd',
'johndeer',
'babylove',
'barbados',
'carpente',
'fishbone',
'fireblad',
'scissors',
'screamer',
'obsidian',
'progress',
'tottenham',
'comanche',
'monsters',
'veronika',
'20202020',
'blueball',
'yankees2',
'wrestler',
'sealteam',
'sidekick',
'smackdow',
'sporting',
'remingto',
'arkansas',
'andersen',
'barcelona',
'baltimor',
'fortress',
'fishfish',
'firefigh',
'rsalinas',
'dontknow',
'universa',
'heinrich',
'enforcer',
'katherine',
'waterboy',
'23skidoo',
'zildjian',
'stoppedby',
'sexybabe',
'speakers',
'polopolo',
'perfect1',
'thrasher',
'lakeside',
'masamune',
'cherries',
'chipmunk',
'cezer121',
'carnival',
'fearless',
'funstuff',
'salasana',
'pantera1',
'qwert123',
'creation',
'nascar24',
'erection',
'ericsson',
'internal',
'1michael',
'19781978',
'25252525',
'sheepdog',
'snowbird',
'toriamos',
'tennesse',
'mazdarx7',
'revolver',
'babycake',
'hallowee',
'cannabis',
'dolemite',
'dodgers1',
'painting',
'coventry',
'christmas',
'cocksucker',
'hotgirls',
'eggplant',
'mustang6',
'monkey12',
'wapapapa',
'volleyba',
'birthday4',
'stephen1',
'suburban',
'soccer10',
'something',
'starcraft',
'soccer12',
'plastics',
'penthous',
'peterbil',
'lakewood',
'reginald',
'goodgirl',
'gotyoass',
'capricor',
'getmoney',
'godfather',
'gilligan',
'dudedude',
'pasadena',
'opendoor',
'magellan',
'printing',
'pressure',
'killkill',
'whiteboy',
'voyager1',
'jackjack',
'success1',
'spongebo',
'phialpha',
'password9',
'tickling',
'lexingky',
'redheads',
'apple123',
'backbone',
'aviation',
'green123',
'carlitos',
'cartman1',
'camaross',
'favorite6',
'ginscoot',
'sabrina1',
'devil666',
'doughnut',
'paintball',
'rainbow1',
'umbrella',
'abc12345',
'complete',
'deerhunt',
'darklord',
'holidays',
'hetfield',
'hillbill',
'hugetits',
'evolutio',
'whiplash',
'wg8e3wjf',
'istanbul',
'bluebell',
'wrestling',
'superior',
'suckdick',
'stephane',
'playball',
'marcello',
'marjorie',
'rockwell',
'baritone',
'gladiator',
'cricket1',
'clemente',
'exchange',
'kisskiss',
'kristian',
'montecar',
'mississi',
'washington',
'20012001',
'bigdick1',
'penguin1',
'pathfind',
'testibil',
'lightning',
'lighting',
'republic',
'anthony7',
'goldeney',
'cameron1',
'freefree',
'screwyou',
'passthie',
'postov1000',
'puppydog',
'a1234567',
'cleopatr',
'contract',
'buffalo1',
'bordeaux',
'sunlight',
'sprinter',
'peaches1',
'pinetree',
'theforce',
'jupiter1',
'mckenzie',
'annmarie',
'austin31',
'78945612',
'calimero',
'chevrolet',
'favorite',
'fellatio',
'f00tball',
'francine',
'gateway2',
'gamecube',
'giovanna',
'scheisse',
'offshore',
'macaroni',
'pringles',
'trouble1',
'coolhand',
'colonial',
'darthvad',
'cygnusx1',
'natalie1',
'eighteen',
'elcamino',
'blueberr',
'yamahar1',
'stafford',
'snowboard',
'speedway',
'playboy2',
'toonarmy',
'baberuth',
'gonzales',
'chiquita',
'charisma',
'capslock',
'cashmone',
'gizmodo1',
'dragonfl',
'rachelle',
'tropical',
'crescent',
'nathanie',
'espresso',
'kikimora',
'20002000',
'birthday1',
'beatles1',
'bigdicks',
'beethove',
'blacklab',
'woodwork',
'survivor',
'pinnacle',
'lemonade',
'lalakers',
'lebowski',
'lalalala',
'mercury1',
'rocknrol',
'riversid',
'11112222',
'alleycat',
'ambrosia',
'australia',
'hattrick',
'cassandr',
'charlie123',
'fighting',
'gabriela',
'outoutout',
'pussy123',
'randolph',
'coldplay',
'novifarm',
'notredam',
'honeybee',
'wednesda',
'waterfal',
'billabon',
'zachary1',
'01234567',
'superstar',
'stiletto',
'sigmachi',
'somerset',
'smithers',
'playmate',
'pinkfloyd',
'laetitia',
'revoluti',
'archange',
'handball',
'chewbacc',
'fullback',
'dominiqu',
'mandrake',
'vagabond',
'csfbr5yy',
'deadspin',
'ncc74656',
'houston1',
'hurricane',
'horseman',
'virginie',
'idontknow',
'151nxjmt',
'bendover',
'surprise',
'supernov',
'phantom1',
'playoffs',
'johngalt',
'maserati',
'riffraff',
'architec',
'cambridg',
'foreplay',
'sanity72',
'salesman',
'dreaming',
'palmtree',
'luckyone',
'treefrog',
'usmarine',
'darkange',
'cyclones',
'bubba123',
'building',
'eclipse1',
'kayleigh',
'mustang2',
'bigtruck',
'yeahyeah',
'stickman',
'skipper1',
'singapor',
'southpaw',
'slamdunk',
'therock1',
'tiger123',
'mccarthy',
'13576479',
'greywolf',
'candyass',
'catfight',
'frankie1',
'qazwsxedc',
'pregnant',
'death666',
'negative',
'hooligan',
'everlast',
'mulligan',
'motocros',
'waterman',
'inspiron',
'bigblack',
'zaq1xsw2',
'yy5rbfsc',
'takehana',
'skydiver',
'special1',
'slimshad',
'sopranos',
'patches1',
'thething',
'mash4077',
'matchbox',
'14789632',
'amethyst',
'baseball1',
'greenman',
'goofball',
'castillo',
'capitals',
'favorite2',
'forsaken',
'feelgood',
'gfxqx686',
'dilbert1',
'dukeduke',
'downhill',
'longhair',
'lockdown',
'mamacita',
'rainyday',
'pumpkin1',
'prospect',
'rainbows',
'trinity1',
'trooper1',
'citation',
'bukowski',
'bubbles1',
'humphrey',
'kcchiefs',
'morticia',
'montrose',
'154ugeiu',
'year2005',
'wonderfu',
'tampabay',
'slapnuts',
'spartan1',
'sprocket',
'sometime',
'stanley1',
'thinking',
'lavalamp',
'laserjet',
'jediknig',
'mazda626',
'alexandra',
'hairball',
'graduate',
'cartoons',
'cashflow',
'outsider',
'mallrats',
'primetime21',
'valleywa',
'abcdefg1',
'natedogg',
'nineball',
'normandy',
'nicetits',
'buddy123',
'highlife',
'earthlin',
'eatmenow',
'kirkland',
'money123',
'warhamme',
'instinct',
'jackass1',
'20spanks',
'blackjack',
'085tzzqi',
'383pdjvl',
'sparhawk',
'pavement',
'johnston',
'material',
'melanie1',
'redlight',
'aolsucks',
'alexalex',
'b929ezzh',
'goodyear',
'griffith',
'863abgsg',
'carebear',
'checkmat',
'forgetit',
'rushmore',
'question',
'ptfe3xxp',
'prophecy',
'aircraft',
'access99',
'cocktail',
'civilwar',
'cleveland',
'claudia1',
'dapzu455',
'daisydog',
'eldiablo',
'kingrich',
'mudvayne',
'vipergts',
'italiano',
'innocent',
'yqlgr667',
'zxcvbnm1',
'suckcock',
'stephens',
'380zliki',
'sexylady',
'sixtynin',
'sleeping',
'sparkles',
'letsdoit',
'landmark',
'marauder',
'register',
'basebal1',
'azertyui',
'hawkwind',
'capetown',
'flathead',
'fisherma',
'flipmode',
'gabriel1',
'dreamcas',
'dirtydog',
'dickdick',
'destiny1',
'trumpet1',
'aaaaaaa1',
'conquest',
'creepers',
'constant',
'cornhole',
'nirvana1',
'elisabet',
'musician',
'milamber',
'isacs155',
'1million',
'1letmein',
'stonewal',
'sexsexsex',
'sonysony',
'smirnoff',
'pentagon',
'paulpaul',
'lighthou',
'letmein22',
'letmesee',
'merchant',
'redstorm',
'14141414',
'allison1',
'basement',
'hartford',
'hardwood',
'fatluvr69',
'fidelity',
'feathers',
'gogators',
'general1',
'dragon69',
'dragonball',
'papillon',
'optimist',
'longshot',
'undertow',
'copenhag',
'delldell',
'culinary',
'ibilltes',
'hihje863',
'envelope',
'express1',
'mustang5',
'wellingt',
'waterski',
'infinite',
'iloveyou!',
'063dyjuy',
'survival',
'stockton',
'softtail',
'slimed123',
'pizzaman',
'pathetic',
'tigercat',
'jennings',
'rootedit',
'riverrat',
'atreides',
'happines',
'chadwick',
'ffvdj474',
'foreskin',
'gameover',
'scoobydoo',
'saxophon',
'macintos',
'lollypop',
'qwertzui',
'adelaide',
'acapulco',
'cybersex',
'davecole',
'davedave',
'nineteen',
'highlander',
'kristin1',
'knuckles',
'katarina',
'montana1',
'wingchun',
'watching',
'illmatic',
'bigpenis',
'blue1234',
'xxxxxxx1',
'svetlana',
'368ejhih',
'playstation',
'pescator',
'jo9k2jw2',
'jupiter2',
'jurassic',
'marines1',
'14725836',
'12345679',
'alessand',
'angelika',
'alpha123',
'barefeet',
'badabing',
'gsxr1000',
'gregory1',
'766rglqy',
'69camaro',
'calendar',
'fishcake',
'giuseppe',
'gnasher23',
'fuzzball',
'save13tx',
'russell1',
'dripping',
'dragon12',
'dragster',
'mainland',
'poophead',
'porn4life',
'producer',
'rapunzel',
'velocity',
'vanessa1',
'trueblue',
'vampire1',
'navyseal',
'nightowl',
'nonenone',
'nightmar',
'bulletin',
'hillside',
'hzze929b',
'hellohel',
'edgewise',
'embalmer',
'excalibur',
'mounta1n',
'muffdive',
'vivitron',
'winfield',
'wednesday',
'17171717',
'17011701',
'tangerin',
'stewart1',
'summer69',
'sweetness',
'surveyor',
'stirling',
'ssptx452',
'thriller',
'master12',
'anastasi',
'almighty',
'argentin',
'flanders',
'flyers88',
'firehawk',
'flashman',
'godspeed',
'giveitup',
'funtimes',
'frenchie',
'disaster',
'lovelife',
'qcmfd454',
'undertaker',
'911turbo',
'cristian',
'daughter',
'notebook',
'borabora',
'brisbane',
'mohammad',
'bettyboo',
'blackice',
'yvtte545',
'tailgate',
'shitshit',
'sooners1',
'smartass',
'pennywis',
'thetruth',
'reindeer',
'allstate',
'greatest',
'caldwell',
'fussball',
'geneviev',
'samadams',
'dipstick',
'losangel',
'loverman',
'pussy4me',
'university',
'troubles',
'churchil',
'crazyman',
'cutiepie',
'bullwink',
'bulldawg',
'horsemen',
'escalade',
'minnesot',
'moonlight',
'mwq6qlzo',
'verygood',
'bellagio',
'sickness',
'skeeter1',
'phaedrus',
'thumper1',
'tmjxn151',
'thematri',
'letmeinn',
'jeffjeff',
'johnmish',
'11001001',
'allnight',
'amatuers',
'attorney',
'happyman',
'graywolf',
'474jdvff',
'551scasi',
'fishtank',
'freewill',
'glendale',
'frogfrog',
'gerhardt',
'scirocco',
'devilman',
'pallmall',
'lunchbox',
'manhatta',
'mandarin',
'pxx3eftp',
'president',
'chris123',
'daedalus',
'natasha1',
'nancy123',
'nevermin',
'newcastle',
'edmonton',
'monterey',
'violator',
'wildstar',
'winter99',
'iqzzt580',
'19741974',
'1q2w3e4r5t',
'bigbucks',
'blackcoc',
'yesterda',
'skinhead',
'shadow12',
'snapshot',
'soccer11',
'pleasant',
'pimpdaddy',
'lionhear',
'littlema',
'lincoln1',
'laughing',
'redshift',
'12locked',
'arizona1',
'alfarome',
'hawthorn',
'goodfell',
'554uzpad',
'flipflop',
'garrison',
'rustydog',
'sandberg',
'samsung1',
'dreamer1',
'detectiv',
'dominick',
'paladin1',
'papabear',
'panasonic',
'nyyankee',
'pussyeat',
'princeto',
'climbing',
'dad2ownu',
'daredevi',
'necklace',
'huskers1',
'hornyman',
'england1',
'motherfucker',
'ilovegod',
'201jedlz',
'wrinkle5',
'zoomzoom',
'09876543',
'starlite',
'peternorth',
'jeepjeep',
'joystick',
'junkmail',
'jojojojo',
'rockrock',
'rasta220',
'andyandy',
'auckland',
'gooseman',
'happydog',
'charlie2',
'cardinals',
'fortune12',
'generals',
'division',
'ozlq6qwm',
'macgyver',
'mallorca',
'prelude1',
'trousers',
'aerosmit',
'clueless',
'delpiero',
'nounours',
'buckaroo',
'honeydew',
'hooters1',
'hugohugo',
'evangeli'
]
|
gov-cjwaszczuk/notifications-admin
|
app/main/_blacklisted_passwords.py
|
Python
|
mit
| 33,857
|
[
"COLUMBUS",
"ESPResSo"
] |
8b958b7d651ff08c2b1db4ec98d320746f5d34e5cfc954f6c39391088852b623
|
import matplotlib
matplotlib.use('Agg', warn=False)
import os
from os.path import dirname, join
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.testing.decorators import image_comparison
from matplotlib.testing.compare import compare_images
from iminuit import Minuit
from probfit.plotting import draw_pdf, draw_compare_hist
from probfit.pdf import gaussian, linear
from probfit.funcutil import rename
from probfit.functor import Extended, AddPdfNorm, AddPdf
from probfit.costfunc import UnbinnedLH, BinnedLH, BinnedChi2, Chi2Regression, \
SimultaneousFit
class image_comparison:
def __init__(self, baseline):
baselineimage = join(dirname(__file__), 'baseline', baseline)
actualimage = join(os.getcwd(), 'actual', baseline)
self.baseline = baseline
self.baselineimage = baselineimage
self.actualimage = actualimage
try:
os.makedirs(dirname(actualimage))
except OSError:
pass
def setup(self):
from matplotlib import rcParams, rcdefaults
#use('Agg', warn=False) # use Agg backend for these tests
# These settings *must* be hardcoded for running the comparison
# tests and are not necessarily the default values as specified in
# rcsetup.py
rcdefaults() # Start with all defaults
rcParams['font.family'] = 'Bitstream Vera Sans'
rcParams['text.hinting'] = False
rcParams['text.hinting_factor'] = 8
rcParams['text.antialiased'] = False
rcParams['lines.antialiased'] = False
def test(self):
# compare_images
x = compare_images(self.baselineimage, self.actualimage, 1.0)
if x is not None:
print x
assert x is None
def __call__(self, f):
def tmp():
self.setup()
f()
plt.savefig(self.actualimage)
plt.close()
return self.test()
tmp.__name__ = f.__name__
return tmp
@image_comparison('draw_pdf.png')
def test_draw_pdf():
plt.figure()
f = gaussian
draw_pdf(f, {'mean':1., 'sigma':2.}, bound=(-10, 10))
@image_comparison('draw_pdf_linear.png')
def test_draw_pdf_linear():
plt.figure()
f = linear
draw_pdf(f, {'m':1., 'c':2.}, bound=(-10, 10))
|
piti118/probfit
|
probfit/test/testplotting.py
|
Python
|
mit
| 2,342
|
[
"Gaussian"
] |
1fbf1b7615c77818c524ded473f27eaf823d87011cc8b645b0400f62311a9dd0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Cannon with Chemistry """
__author__ = "Andy Casey <arc@ast.cam.ac.uk>"
import logging
import multiprocessing as mp
import numpy as np
from collections import OrderedDict
import scipy.optimize as op
from . import (atomic, cannon, model, plot, utils)
logger = logging.getLogger("fireworks")
# TODO
from time import time
class FireworksModel(cannon.CannonModel):
_trained_attributes \
= ("_coefficients", "_scatter", "_offsets", "_label_vector_description",
"_atomic_lines", "_stellar_parameter_labels")
_data_attributes \
= ("_labels", "_wavelengths", "_fluxes", "_flux_uncertainties")
def __init__(self, labels, wavelengths, fluxes, flux_uncertainties,
verify=True):
"""
Initialise a Cannon-with-Chemistry model.
:param labels:
A table with columns as labels, and stars as rows.
:type labels:
:class:`~astropy.table.Table`
:param wavelengths:
The wavelengths of the given pixels.
:type wavelengths:
:class:`np.array`
:param fluxes:
An array of fluxes for each star as shape (num_stars, num_pixels).
The num_stars should match the rows in `labels`.
:type fluxes:
:class:`np.ndarray`
:param flux_uncertainties:
An array of 1-sigma flux uncertainties for each star as shape
(num_stars, num_pixels). The shape of the `flux_uncertainties` array
should match the `fluxes` array.
:type flux_uncertainties:
:class:`np.ndarray`
"""
super(FireworksModel, self).__init__(labels, fluxes, flux_uncertainties,
wavelengths=wavelengths, verify=verify)
@property
@model.requires_training_wheels
def lv_labels(self):
"""
Return a list of the labels involved in this model. This includes any
labels in the description of the label vector, as well as any individual
abundance labels.
"""
labels = super(self.__class__, self).lv_labels
# We check for _atomic_lines instead of _trained because we might train
# without any atomic lines.
if self._atomic_lines is not None:
labels += self._atomic_lines.keys()
return labels
def train(self, label_vector_description, N=None, limits=None, pivot=True,
atomic_lines=None, X_H=True, stellar_parameter_labels=None, **kwargs):
"""
Train a Cannon model based on the label vector description provided.
:params label_vector_description:
The human-readable form of the label vector description.
:type label_vector_description:
str or list of str
:param N: [optional]
Limit the number of stars used in the training set. If left to None,
all stars will be used.
:type N:
None or int
:param limits: [optional]
A dictionary containing labels (keys) and upper/lower limits (as a
two-length tuple).
:type limits:
dict
:param pivot: [optional]
Pivot the data about the labels.
:type pivot:
bool
:params atomic_lines: [optional]
Atomic absorption lines that should be modelled as part of the first
entry in the label vector. If given, this should be a dictionary
containing the label names that correspond to $\log_\epsilon(X)$
abundances in `CannonModel._labels` as keys, and the values should
be an `~astropy.table.Table` containing the atomic transitions for
that element.
:type atomic_lines:
dict
:param X_H: [optional]
Use abundances in X_H format. If set to `False`, then log(X)
abundance formats are assumed.
:type X_H:
bool
:returns:
A three-length tuple containing the model coefficients, the scatter
in each pixel, and the label offsets.
"""
if stellar_parameter_labels is None:
stellar_parameter_labels = ["TEFF", "LOGG", "PARAM_M_H"]
# Since building the atomic line models takes longer than building the
# label vector array, we build the vector array first so that any errors
# will appear first.
self._label_vector_description = label_vector_description
lv = self._parse_label_vector_description(label_vector_description)
lva, use, offsets = cannon._build_label_vector_array(self._labels, lv,
N, limits, pivot)
# Initialise the requisite arrays.
N_stars, N_pixels = self._fluxes.shape[:2]
scatter = np.nan * np.ones(N_pixels)
coefficients = np.nan * np.ones((N_pixels, lva.shape[0]))
weak_line_fluxes = np.ones((N_stars, N_pixels))
# Any atomic lines to model?
atomic_lines = _validate_atomic_lines(self._labels, atomic_lines)
if atomic_lines:
N_species = len(atomic_lines)
N_transitions = sum(map(len, atomic_lines.values()))
msg = []
for k, v in atomic_lines.items():
msg.append("{0} (species {1}; {2} lines)".format(
k, ", ".join(map(str, set(v["species"]))), len(v)))
logger.info("Including {0} weak lines from {1} elements: {2}".format(
N_transitions, N_species, ", ".join(msg)))
# Build the log(X)->EW models (or vice-versa, sigh)
# Estimate the FWHM kernel for each star, or estimate from all stars
# (We need the FWHM to link the EW to an actual flux value.)
# [TODO]
p_sigma = 0.35
# We should calculate the expected EWs (and therefore fluxes) for
# each atomic line for each star in the training set, because this
# will form the first element of our label vector array.
all_stellar_parameters = np.vstack(
[self._labels[label] for label in stellar_parameter_labels]).T
# [TODO] This part is unnecessarily slow. Speed it up.
# [TODO] It's also probably catagorically wrong.
atomic_line_model = {}
for label, transitions in atomic_lines.items():
ew_coefficients = atomic.approximate_atomic_transitions(
all_stellar_parameters, transitions, X_H=X_H, **kwargs)
atomic_line_model[label] = (transitions, ew_coefficients)
# Generate the weak line fluxes for each star.
for i, stellar_parameters in enumerate(all_stellar_parameters):
for label, (transitions, ew_coefficients) in atomic_line_model.items():
abundance = self._labels[label][i]
for j, mu in enumerate(transitions["wavelength"]):
# The 1e-3 factor is to turn the EW from milliAngstroms
# into Angstroms.
expected_ew = atomic._solve_equivalent_width(abundance,
ew_coefficients[j], mu, stellar_parameters) * 1e-3
# Translate this into a weak profile.
# EW = sqrt(2*pi) * amplitude * sigma
# we know the central wavelength, we know the sigma
# (EW is in mA, and we want A)
amplitude = expected_ew/(np.sqrt(2*np.pi) * p_sigma)
weak_line_fluxes[i] *= 1. \
- amplitude * np.exp(-(self._wavelengths - mu)**2 \
/ (2. * p_sigma**2))
# Update the offsets to be zero for atomic lines.
offsets.update({ k: 0 for k in atomic_lines })
elif kwargs.get("_atomic_line_model", None):
atomic_line_model = kwargs.pop("_atomic_line_model")
offsets.update({ k: 0 for k in atomic_line_model })
else:
atomic_line_model = None
assert N is None, "whoops?"
N_threads = int(max([1, kwargs.pop("threads", 1)]))
pb_size = 100 if kwargs.pop("__progressbar", True) else 0
if N_threads == 1:
pb_message = "Training {0} model from {1} stars with {2} pixels:\n"\
.format(self.__class__.__name__[:-5], N_stars, N_pixels)
for i in utils.progressbar(range(N_pixels), pb_message, pb_size):
if np.isfinite(self._fluxes[use, i] \
* self._flux_uncertainties[use, i]).sum() == 0:
continue
# Train the Cannon on the residuals of the data.
# I *think* this is OK to do
# (e.g., Hogg may be wrong??? -- famous last words?)
coefficients[i, :], scatter[i] = cannon._fit_pixel(
self._fluxes[use, i] / weak_line_fluxes[use, i],
self._flux_uncertainties[use, i], lva, **kwargs)
if not np.any(np.isfinite(scatter[i] * coefficients[i, :])):
logger.warn("No finite coefficients at pixel {}!".format(i))
else:
pb_mg = "Training Cannon model in {0} parallel threads from {1} s"\
"tars with {2} pixels each".format(N_threads, N_stars, N_pixels)
# Summertime!
processes = []
pool = mp.Pool(N_threads)
for i in range(N_pixels):
if not np.any(np.isfinite(
self._fluxes[use, i] * self._flux_uncertainties[use, i])):
continue
p = pool.apply_async(_fit_pixel, args=(self._fluxes[use, i],
self._flux_uncertainties[use, i], lva), kwds=kwargs)
processes.append((i, p))
# Collate the results.
for i, p in utils.progressbar(processes, message=pb_mg,
size=N_pixels if pb_show else -1):
coefficients[i, :], scatter[i] = p.get()
if not np.any(np.isfinite(scatter[i] * coefficients[i, :])):
logger.warn("No finite coefficients at pixel {}".format(i))
# Winter is coming.
pool.close()
pool.join()
# Save all of these to the model.
self._trained = True
self._coefficients, self._scatter, self._offsets \
= coefficients, scatter, offsets
self._atomic_lines, self._stellar_parameter_labels \
= atomic_line_model, stellar_parameter_labels
return (coefficients, scatter, offsets, atomic_line_model,
weak_line_fluxes)
@model.requires_training_wheels
def predict(self, labels=None, **labels_as_kwargs):
"""
Predict spectra from the trained model, given the labels.
:param labels:
The labels required for the trained model. This should be a N-length
list matching the number of unique terms in the model, including any
atomic (weak) line abundances in the order given by `self.lv_labels`
property. Alternatively, labels can be explicitly given as keyword
arguments.
:type labels:
list
:returns:
Model spectra for the given labels.
:raises TypeError:
If the model is not trained.
"""
try:
labels[0]
except (TypeError, IndexError):
labels = [labels]
names = self.lv_labels
if labels is None:
labels = [labels_as_kwargs[name] for name in names]
elif len(labels) != len(names):
raise ValueError("expected number of labels is {0}, and {1} were "\
"given: {2}".format(len(names), len(labels),
", ".join(names)))
t_i = time()
# Generate the Cannon-ical flux.
label_vector_indices = self._parse_label_vector_description(
self._label_vector_description, return_indices=True,
__columns=names)
offsets = np.array([self._offsets[name] for name in names])
fluxes = np.dot(self._coefficients, cannon._build_label_vector_rows(
label_vector_indices, labels - offsets).T).flatten()
t_a = time()
tc = 0
# Include treatment of any atomic lines.
if self._atomic_lines is not None:
N = len(self._atomic_lines)
stellar_parameters = [labels[names.index(_)] \
for _ in self._stellar_parameter_labels]
#weak_line_fluxes = np.ones(fluxes.size)
for i, (label, abundance) \
in enumerate(zip(self._atomic_lines.keys(), labels[-N:])):
transitions, ew_coefficients = self._atomic_lines[label]
for j, mu in enumerate(transitions["wavelength"]):
# The 1e-3 factor is to turn the EW from milliAngstroms to A
t_cc = time()
expected_ew = atomic._solve_equivalent_width(abundance,
ew_coefficients[j], mu, stellar_parameters) * 1e-3
tc += time() - t_cc
p_sigma = 0.35
assert expected_ew >= 0
# Translate this into a Gaussian profile.
# EW = sqrt(2*pi) * amplitude * sigma
# we know the central wavelength, we know the sigma
amplitude = expected_ew/(np.sqrt(2*np.pi) * p_sigma)
fluxes *= 1. \
- amplitude * np.exp(-(self._wavelengths - mu)**2 \
/ (2. * p_sigma**2))
#fluxes *= weak_line_fluxes
t_b = time()
#print("A B", t_a - t_i, t_b - t_a, 100.*tc/(t_b-t_a))
return fluxes
@model.requires_training_wheels
def solve_labels(self, flux, flux_uncertainties, **kwargs):
"""
Solve the labels for given fluxes (and uncertainties) using the trained
model.
:param fluxes:
The normalised fluxes. These should be on the same wavelength scale
as the trained data.
:type fluxes:
:class:`~np.array`
:param flux_uncertainties:
The 1-sigma uncertainties in the fluxes. This should have the same
shape as `fluxes`.
:type flux_uncertainties:
:class:`~np.array`
:returns:
The labels for the given fluxes as a dictionary.
:raises TypeError:
If the model is not trained.
"""
# Get an initial estimate of those parameters from a simple inversion.
# (This is very much incorrect for non-linear terms).
finite = \
np.isfinite(self._coefficients[:, 0] * flux * flux_uncertainties)
Cinv = 1.0 / (self._scatter[finite]**2 + flux_uncertainties[finite]**2)
A = np.dot(self._coefficients[finite, :].T,
Cinv[:, None] * self._coefficients[finite, :])
B = np.dot(self._coefficients[finite, :].T,
Cinv * flux[finite])
initial_vector_p0 = np.linalg.solve(A, B)
# p0 contains all coefficients, but we only want the linear terms to
# make an initial estimate.
indices, names = self._get_linear_indices(self._label_vector_description,
full_output=True)
if len(indices) == 0:
raise NotImplementedError("no linear terms in Cannon model -- TODO")
# Get the initial guess of just the linear parameters.
# (Here we make a + 1 adjustment for the first '1' term)
p0 = initial_vector_p0[indices + 1]
logger.debug("Initial guess: {0}".format(dict(zip(names, p0))))
# Now we need to build up label vector rows by indexing relative to the
# labels that we will actually be solving for (in this case it's the
# variable 'names'), and not the labels as they are currently referenced
# in self._labels
label_vector_indices = self._parse_label_vector_description(
self._label_vector_description, return_indices=True,
__columns=names)
# Do we have individual line abundances to consider?
if self._atomic_lines is not None:
# Need to extend p0 to include individual abundances.
names.extend(self._atomic_lines.keys())
# Just take the median of the abundances, that should be OK for an
# initial guess (at any stellar parameters).
# [TODO] In the future we may just guess this at M_H, but need to
# be sure we are dealing with M_H and not log_X, or shift by solar.
p0 = np.hstack([p0, [np.nanmedian(self._labels[p]) \
for p in self._atomic_lines.keys()]])
# Optimise the curve to solve for the parameters and covariance.
full_output = kwargs.pop("full_output", False)
kwds = kwargs.copy()
kwds.setdefault("maxfev", 10000)
f = lambda _, *labels: self.predict(labels)[finite]
td = time()
p_opt, p_covariance = op.curve_fit(f, self._coefficients[finite],
flux[finite], p0=p0, sigma=1.0/np.sqrt(Cinv), absolute_sigma=True,
**kwds)
te = time()
print("OK:",1000 * (te - td))
# We might have solved for any number of parameters, so we return a dict
p_opt = { k: p_opt[i] + self._offsets[k] for i, k in enumerate(names) }
logger.debug("Final solution: {}".format(p_opt))
if full_output:
return (p_opt, p_covariance)
return p_opt
@model.requires_training_wheels
def cross_validate(self, label_vector_description=None, **kwargs):
"""
Perform leave-one-out cross-validation on the trained model.
:params label_vector_description: [optional]
The human-readable form of the label vector description. If None is
given, the currently trained label vector description is used.
:type label_vector_description:
str
:returns:
A two-length tuple containing an array of the expected train labels
for each star, and the inferred labels.
"""
# Initialise arrays.
if label_vector_description is None:
label_vector_description = self._label_vector_description
label_names = self.lv_labels
N_realisations, N_labels = self._fluxes.shape[0], len(label_names)
inferred_test_labels = np.nan * np.ones((N_realisations, N_labels))
expected_test_labels = np.ones((N_realisations, N_labels))
debug = kwargs.get("debug", False)
for i in range(N_realisations):
logger.info("Doing cross-validation realisation {0}/{1} on a test "\
"set containing 1 star".format(i + 1, N_realisations))
mask = np.ones(N_realisations, dtype=bool)
mask[i] = False
# Create a model to use so we don't overwrite self.
model = self.__class__(self._labels[mask], self._wavelengths,
self._fluxes[mask, :], self._flux_uncertainties[mask, :])
# Directly provide the atomic line model, since this won't change
# (almost at all) when one star is removed.
model.train(label_vector_description,
stellar_parameter_labels=self._stellar_parameter_labels,
_atomic_line_model=self._atomic_lines, **kwargs)
# Solve for the one left out.
try:
inferred_labels = model.solve_labels(
self._fluxes[~mask, :].flatten(),
self._flux_uncertainties[~mask, :].flatten())
except:
logger.exception("Exception in solving star with index {0} in "\
"cross-validation".format(i))
if debug: raise
else:
# Save inferred test labels.
for j, name in enumerate(label_names):
inferred_test_labels[i, j] = inferred_labels[name]
finally:
# Save expected test labels.
for j, name in enumerate(label_names):
expected_test_labels[i, j] = self._labels[~mask][name]
return (label_names, expected_test_labels, inferred_test_labels)
def _validate_atomic_lines(labels, atomic_lines):
"""
Check the atomic line data provided.
"""
if atomic_lines is None:
return False
if not isinstance(atomic_lines, dict):
raise TypeError("atomic lines should be a dictionary with log(X) "\
" abundance labels (as keys) and transition tables as values")
atomic_lines = OrderedDict(sorted(atomic_lines.items(),
key=lambda _: min(_[1]["species"])))
valid_atomic_lines = {}
# Check that the keys actually exist in the _labels table.
for label, transitions in atomic_lines.items():
if label not in labels.dtype.names:
raise IndexError("cannot find atomic line abundance label {0} "\
"in the labels table".format(label))
required_columns = ("wavelength", "species", "excitation_potential",
"loggf")
for column in required_columns:
if column not in transitions.dtype.names:
raise TypeError("could not find '{0}' column in table of "
"transitions for corresponding label '{1}'".format(
column, label))
# Check that the transitions in a given value set are all of the
# same element.
species = set(map(int, transitions["species"]))
if len(species) > 1:
raise ValueError("the '{0}' abundance label contains mixed "
"species in the transitions table: {1}".format(label,
", ".join(species)))
valid_atomic_lines[label] = transitions
return valid_atomic_lines
|
andycasey/fireworks
|
fireworks/fireworks.py
|
Python
|
mit
| 22,183
|
[
"Gaussian"
] |
a764581bd6d1f5767dcff4dd9210c16d2c2d6e681916558e32e08e888cac25ad
|
"""
Nonlinear filters.
"""
import numpy as np
import scipy.signal as sig
def neighbours(img_dims, coord):
"""
Get all neighbours of a pixel.
Parameters :
img_dims : image dimensions
coord : coordinates of pixel
"""
rows, cols = img_dims
i, j = coord
# Check if coords are within image
if (i < 0 or j < 0 or i >= rows or j >= cols):
return ValueError("Coordinates outside of image")
return ([(k, l) for k in range(i-1, i+2) for l in range(j-1, j+2)
if (k != i or l != j) and (0 <= k < rows) and (0 <= l < cols)])
def median(img, kernel_size):
"""
Median filter.
Parameters :
img : input 2D image
kernel_size : (length, width) of kernel
"""
# Convert to numpy array if necessary
if not isinstance(img, np.ndarray): img = np.array(img)
return sig.medfilt2d(img, kernel_size)
def bilateral_weight(img, pixel_coord, neigh_coord, sigma_r, sigma_d):
"""
Bilateral weight function.
Parameters :
img : image
pixel_coord : coordinates of pixel
neigh_coord : coordinates of neighbouring pixel
sigma_d : spatial parameter (larger features smoothened)
sigma_r : range parameter (approaches Gaussian convolution)
"""
i, j = pixel_coord
k, l = neigh_coord
# Check that coordinates are within image
# (N.B. Not necessary if calling from inside bilateral())
#if (i < 0 or j < 0 or i >= img.shape[0] or j >= img.shape[1] or
# k < 0 or l < 0 or k >= img.shape[0] or l >= img.shape[1]):
# raise ValueError("Coordinates outside of image")
# Get domain kernel
d = np.exp(- ((i - k)**2 + (j - l)**2) / (2 * sigma_d**2))
# Get range kernel
r = np.exp(- (img[i, j] - img[k, l])**2 / (2 * sigma_r**2))
# If multiple colour bands, use this:
#r = np.exp(- np.linalg.norm(img[i, j] - img[k, l]) / (2 * sigma_r**2))
# Multiply to get weight function
return d * r
# FIXME: not working properly yet
def bilateral(img, sigma_d, sigma_r):
"""
Bilateral filter.
Parameters :
img : image
sigma_d : spatial parameter (larger features smoothened)
sigma_r : range parameter (approaches Gaussian convolution)
"""
# Convert to numpy array if necessary
if not isinstance(img, np.ndarray): img = np.array(img)
sigma_d = float(sigma_d)
sigma_r = float(sigma_r)
img_filt = np.empty(img.shape)
# Get all pixels
pixels = [(i, j) for i in range(img.shape[0]) for j in range(img.shape[1])]
# Loop over all pixels
for pixel in pixels:
# Loop over neighbours
numer = 0.0
denom = 0.0
for neigh in neighbours(img.shape, pixel):
# Compute weight function
f = img[neigh]
w = bilateral_weight(img, pixel, neigh, sigma_d, sigma_r)
numer += f * w
denom += w
print numer, denom
img_filt[pixel] = numer / denom
print pixel, img[pixel], img_filt[pixel]
return img_filt
|
pauljxtan/pystuff
|
pycompvis/compvis/imgproc/filters/nonlinear.py
|
Python
|
mit
| 3,097
|
[
"Gaussian"
] |
ae73edcd3bcb78f4fc62a881d12e6ccafcdd4f436381ea77744fc323bf43ee37
|
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the LaunchService class."""
import asyncio
import collections.abc
import contextlib
import logging
import platform
import signal
import threading
import traceback
from typing import Coroutine
from typing import Iterable
from typing import List # noqa: F401
from typing import Optional
from typing import Set # noqa: F401
from typing import Text
from typing import Tuple # noqa: F401
import launch.logging
import osrf_pycommon.process_utils
from .event import Event
from .event_handlers import OnIncludeLaunchDescription
from .event_handlers import OnShutdown
from .events import IncludeLaunchDescription
from .events import Shutdown
from .launch_context import LaunchContext
from .launch_description import LaunchDescription
from .launch_description_entity import LaunchDescriptionEntity
from .some_actions_type import SomeActionsType
from .utilities import AsyncSafeSignalManager
from .utilities import visit_all_entities_and_collect_futures
class LaunchService:
"""Service that manages the event loop and runtime for launched system."""
def __init__(
self,
*,
argv: Optional[Iterable[Text]] = None,
noninteractive: bool = False,
debug: bool = False
) -> None:
"""
Create a LaunchService.
:param: argv stored in the context for access by the entities, None results in []
:param: noninteractive if True (not default), this service will assume it has
no terminal associated e.g. it is being executed from a non interactive script
:param: debug if True (not default), asyncio the logger are seutp for debug
"""
# Setup logging and debugging.
launch.logging.launch_config.level = logging.DEBUG if debug else logging.INFO
self.__debug = debug
self.__argv = argv if argv is not None else []
# Setup logging
self.__logger = launch.logging.get_logger('launch')
# Setup context and register a built-in event handler for bootstrapping.
self.__context = LaunchContext(argv=self.__argv, noninteractive=noninteractive)
self.__context.register_event_handler(OnIncludeLaunchDescription())
self.__context.register_event_handler(OnShutdown(on_shutdown=self.__on_shutdown))
# Setup storage for state.
self._entity_future_pairs = \
[] # type: List[Tuple[LaunchDescriptionEntity, asyncio.Future]]
# Used to allow asynchronous use of self.__loop_from_run_thread without
# it being set to None by run() as it exits.
self.__loop_from_run_thread_lock = threading.RLock()
self.__loop_from_run_thread = None
self.__this_task = None
# Used to indicate when shutdown() has been called.
self.__shutting_down = False
self.__shutdown_when_idle = False
# Used to keep track of whether or not there were unexpected exceptions.
self.__return_code = 0
def emit_event(self, event: Event) -> None:
"""
Emit an event synchronously and thread-safely.
If the LaunchService is not running, the event is queued until it is.
"""
future = None
with self.__loop_from_run_thread_lock:
if self.__loop_from_run_thread is not None:
# loop is in use, asynchronously emit the event
future = asyncio.run_coroutine_threadsafe(
self.__context.emit_event(event),
self.__loop_from_run_thread
)
else:
# loop is not in use, synchronously emit the event, and it will be processed later
self.__context.emit_event_sync(event)
if future is not None:
# Block until asynchronously emitted event is emitted by loop
future.result()
def include_launch_description(self, launch_description: LaunchDescription) -> None:
"""
Evaluate a given LaunchDescription and visits all of its entities.
This method is thread-safe.
"""
self.emit_event(IncludeLaunchDescription(launch_description))
def _prune_and_count_entity_future_pairs(self):
needs_prune = False
for pair in self._entity_future_pairs:
if pair[1].done():
needs_prune = True
if needs_prune:
self._entity_future_pairs = \
[pair for pair in self._entity_future_pairs if not pair[1].done()]
return len(self._entity_future_pairs)
def _prune_and_count_context_completion_futures(self):
needs_prune = False
for future in self.__context._completion_futures:
if future.done():
needs_prune = True
if needs_prune:
self.__context._completion_futures = \
[f for f in self.__context._completion_futures if not f.done()]
return len(self.__context._completion_futures)
def _is_idle(self):
number_of_entity_future_pairs = self._prune_and_count_entity_future_pairs()
number_of_entity_future_pairs += self._prune_and_count_context_completion_futures()
return number_of_entity_future_pairs == 0 and self.__context._event_queue.empty()
@contextlib.contextmanager
def _prepare_run_loop(self):
try:
# Acquire the lock and initialize the loop.
with self.__loop_from_run_thread_lock:
if self.__loop_from_run_thread is not None:
raise RuntimeError(
'LaunchService cannot be run multiple times concurrently.'
)
this_loop = asyncio.get_event_loop()
if self.__debug:
this_loop.set_debug(True)
# Set the asyncio loop for the context.
self.__context._set_asyncio_loop(this_loop)
# Recreate the event queue to ensure the same event loop is being used.
new_queue = asyncio.Queue()
while True:
try:
new_queue.put_nowait(self.__context._event_queue.get_nowait())
except asyncio.QueueEmpty:
break
self.__context._event_queue = new_queue
self.__loop_from_run_thread = this_loop
# Get current task.
try:
# Python 3.7+
this_task = asyncio.current_task(this_loop)
except AttributeError:
this_task = asyncio.Task.current_task(this_loop)
self.__this_task = this_task
# Setup custom signal handlers for SIGINT, SIGTERM and maybe SIGQUIT.
sigint_received = False
def _on_sigint(signum):
nonlocal sigint_received
base_msg = 'user interrupted with ctrl-c (SIGINT)'
if not sigint_received:
self.__logger.warning(base_msg)
ret = self._shutdown(
reason='ctrl-c (SIGINT)', due_to_sigint=True, force_sync=True
)
assert ret is None, ret
sigint_received = True
else:
self.__logger.warning('{} again, ignoring...'.format(base_msg))
def _on_sigterm(signum):
signame = signal.Signals(signum).name
self.__logger.error(
'user interrupted with ctrl-\\ ({}), terminating...'.format(signame))
# TODO(wjwwood): try to terminate running subprocesses before exiting.
self.__logger.error('using {} can result in orphaned processes'.format(signame))
self.__logger.error('make sure no processes launched are still running')
this_loop.call_soon(this_task.cancel)
with AsyncSafeSignalManager(this_loop) as manager:
# Setup signal handlers
manager.handle(signal.SIGINT, _on_sigint)
manager.handle(signal.SIGTERM, _on_sigterm)
if platform.system() != 'Windows':
manager.handle(signal.SIGQUIT, _on_sigterm)
# Yield asyncio loop and current task.
yield this_loop, this_task
finally:
# No matter what happens, unset the loop.
with self.__loop_from_run_thread_lock:
self.__context._set_asyncio_loop(None)
self.__loop_from_run_thread = None
self.__shutting_down = False
async def _process_one_event(self) -> None:
next_event = await self.__context._event_queue.get()
await self.__process_event(next_event)
async def __process_event(self, event: Event) -> None:
self.__logger.debug("processing event: '{}'".format(event))
for event_handler in tuple(self.__context._event_handlers):
if event_handler.matches(event):
self.__logger.debug(
"processing event: '{}' ✓ '{}'".format(event, event_handler))
self.__context._push_locals()
entities = event_handler.handle(event, self.__context)
entities = \
entities if isinstance(entities, collections.abc.Iterable) else (entities,)
for entity in [e for e in entities if e is not None]:
from .utilities import is_a_subclass
if not is_a_subclass(entity, LaunchDescriptionEntity):
raise RuntimeError(
"expected a LaunchDescriptionEntity from event_handler, got '{}'"
.format(entity)
)
self._entity_future_pairs.extend(
visit_all_entities_and_collect_futures(entity, self.__context))
self.__context._pop_locals()
else:
pass
# Keep this commented for now, since it's very chatty.
# self.__logger.debug(
# 'launch.LaunchService',
# "processing event: '{}' x '{}'".format(event, event_handler))
async def run_async(self, *, shutdown_when_idle=True) -> int:
"""
Visit all entities of all included LaunchDescription instances asynchronously.
This should only ever be run from the main thread and not concurrently with other
asynchronous runs.
:param: shutdown_when_idle if True (default), the service will shutdown when idle.
"""
# Make sure this has not been called from any thread but the main thread.
if threading.current_thread() is not threading.main_thread():
raise RuntimeError(
'LaunchService can only be run in the main thread.'
)
return_code = 0
with self._prepare_run_loop() as (this_loop, this_task):
# Log logging configuration details.
launch.logging.log_launch_config(logger=self.__logger)
# Setup the exception handler to make sure we return non-0 when there are errors.
def _on_exception(loop, context):
nonlocal return_code
return_code = 1
return loop.default_exception_handler(context)
this_loop.set_exception_handler(_on_exception)
process_one_event_task = None
while True:
try:
# Check if we're idle, i.e. no on-going entities (actions) or events in
# the queue
is_idle = self._is_idle() # self._entity_future_pairs is pruned here
if not self.__shutting_down and shutdown_when_idle and is_idle:
ret = await self._shutdown(reason='idle', due_to_sigint=False)
assert ret is None, ret
continue
# Stop running if we're shutting down and there's no more work
if self.__shutting_down and is_idle:
if (
process_one_event_task is not None and
not process_one_event_task.done()
):
process_one_event_task.cancel()
break
# Collect futures to wait on
# We only need to wait on futures if there are no events to wait on
entity_futures = []
if self.__context._event_queue.empty():
entity_futures = [pair[1] for pair in self._entity_future_pairs]
entity_futures.extend(self.__context._completion_futures)
# If the current task is done, create a new task to process any events
# in the queue
if process_one_event_task is None or process_one_event_task.done():
process_one_event_task = this_loop.create_task(self._process_one_event())
# Add the process event task to the list of awaitables
entity_futures.append(process_one_event_task)
# Wait on events and futures
completed_tasks, _ = await asyncio.wait(
entity_futures,
return_when=asyncio.FIRST_COMPLETED
)
# Propagate exception from completed tasks
completed_tasks_exceptions = [task.exception() for task in completed_tasks]
completed_tasks_exceptions = list(filter(None, completed_tasks_exceptions))
if completed_tasks_exceptions:
self.__logger.debug('An exception was raised in an async action/event')
# in case there is more than one completed_task, log other exceptions
for completed_tasks_exception in completed_tasks_exceptions[1:]:
self.__logger.error(completed_tasks_exception)
raise completed_tasks_exceptions[0]
except KeyboardInterrupt:
continue
except asyncio.CancelledError:
self.__logger.error('run task was canceled')
return_code = 1
break
except Exception as exc:
msg = 'Caught exception in launch (see debug for traceback): {}'.format(exc)
self.__logger.debug(traceback.format_exc())
self.__logger.error(msg)
ret = await self._shutdown(reason=msg, due_to_sigint=False)
assert ret is None, ret
return_code = 1
# keep running to let things shutdown properly
continue
return return_code
def run(self, *, shutdown_when_idle=True) -> int:
"""
Run an event loop and visit all entities of all included LaunchDescription instances.
This should only ever be run from the main thread and not concurrently with
asynchronous runs (see `run_async()` documentation).
Note that KeyboardInterrupt is caught and ignored, as signals are handled separately.
After the run ends, this behavior is undone.
:param: shutdown_when_idle if True (default), the service will shutdown when idle
"""
loop = osrf_pycommon.process_utils.get_loop()
run_async_task = loop.create_task(self.run_async(
shutdown_when_idle=shutdown_when_idle
))
while True:
try:
return loop.run_until_complete(run_async_task)
except KeyboardInterrupt:
continue
def __on_shutdown(self, event: Event, context: LaunchContext) -> Optional[SomeActionsType]:
self.__shutting_down = True
self.__context._set_is_shutdown(True)
return None
def _shutdown(self, *, reason, due_to_sigint, force_sync=False) -> Optional[Coroutine]:
# Assumption is that this method is only called when running.
retval = None
if not self.__shutting_down:
shutdown_event = Shutdown(reason=reason, due_to_sigint=due_to_sigint)
asyncio_event_loop = None
try:
asyncio_event_loop = asyncio.get_event_loop()
except (RuntimeError, AssertionError):
# If no event loop is set for this thread, asyncio will raise an exception.
# The exception type depends on the version of Python, so just catch both.
pass
if force_sync:
self.__context.emit_event_sync(shutdown_event)
elif self.__loop_from_run_thread == asyncio_event_loop:
# If in the thread of the loop.
retval = self.__context.emit_event(shutdown_event)
else:
# Otherwise in a different thread, so use the thread-safe method.
self.emit_event(shutdown_event)
self.__shutting_down = True
self.__context._set_is_shutdown(True)
return retval
def shutdown(self, force_sync=False) -> Optional[Coroutine]:
"""
Shutdown all on-going activities and then stop the asyncio run loop.
This will cause the running LaunchService to eventually exit.
Does nothing if the LaunchService is not running.
This will return an awaitable coroutine if called from within the loop.
This method is thread-safe.
"""
with self.__loop_from_run_thread_lock:
if self.__loop_from_run_thread is not None:
return self._shutdown(
reason='LaunchService.shutdown() called',
due_to_sigint=False, force_sync=force_sync
)
@property
def context(self):
"""Getter for context."""
return self.__context
@property
def event_loop(self):
"""Getter for the event loop being used in the thread running the launch service."""
return self.__loop_from_run_thread
@property
def task(self):
"""Return asyncio task associated with this launch service."""
return self.__this_task
|
ros2/launch
|
launch/launch/launch_service.py
|
Python
|
apache-2.0
| 18,972
|
[
"VisIt"
] |
7876ddd57e6d7bc57fc6cea96b9b6867cd23fc187e2f1ca7d6eadb399e8fd380
|
# -*- coding: utf-8 -*-
"""Word cloud is ungraded xblock used by students to
generate and view word cloud.
On the client side we show:
If student does not yet answered - `num_inputs` numbers of text inputs.
If student have answered - words he entered and cloud.
"""
import json
import logging
import datetime
import csv
import StringIO
from pkg_resources import resource_string
from xmodule.raw_module import EmptyDataRawDescriptor
from xmodule.editing_module import MetadataOnlyEditingDescriptor
from xmodule.x_module import XModule
from django.contrib.auth.models import User
from django.utils.timezone import UTC
from xblock.fields import Scope, Dict, Boolean, List, Integer, String
from xmodule.modulestore import Location
log = logging.getLogger(__name__)
from django.utils.translation import ugettext as _
from django.conf import settings
def pretty_bool(value):
"""Check value for possible `True` value.
Using this function we can manage different type of Boolean value
in xml files.
"""
bool_dict = [True, "True", "true", "T", "t", "1"]
return value in bool_dict
class MasterClassFields(object):
"""XFields for word cloud."""
display_name = String(
display_name=_("Display Name"),
help=_("Display name for this module"),
scope=Scope.settings,
default=_("Master Class")
)
total_places = Integer(
display_name=_("Max places"),
help=_("Number of places available for students to register for masterclass."),
scope=Scope.settings,
default=30,
values={"min": 1}
)
autopass_score = Integer(
display_name=_("Autopass score"),
help=_("Autopass score to automaticly pass registration for masterclass."),
scope=Scope.settings,
default=250,
values={"min": 1}
)
problem_id = String(
display_name=_("Masterclass problem id"),
help=_("Full id of the problem which is to be acomplished to pass registration for masterclass."),
scope=Scope.settings,
#default=_("Master Class") # no default
)
auto_register_if_passed = Boolean(
display_name=_("Auto registration"),
help=_("Auto registration for masterclass if a user passed the test"),
scope=Scope.settings,
default=False,
)
# Fields for descriptor.
submitted = Boolean(
help=_("Whether this student has been register for this master class."),
scope=Scope.user_state,
default=False
)
all_registrations = List(
help=_("All registrations from all students."),
scope=Scope.user_state_summary
)
passed_registrations = List(
help=_("Passed registrations."),
scope=Scope.user_state_summary
)
passed_masterclass_test = Boolean(
help=_("Whether this student has passed the task to register for the masterclass."),
scope=Scope.user_state,
default=False
)
class MasterClassModule(MasterClassFields, XModule):
"""MasterClass Xmodule"""
js = {
'coffee': [resource_string(__name__, 'js/src/javascript_loader.coffee')],
'js': [resource_string(__name__, 'js/src/word_cloud/d3.min.js'),
resource_string(__name__, 'js/src/word_cloud/d3.layout.cloud.js'),
resource_string(__name__, 'js/src/master_class/master_class.js'),
resource_string(__name__, 'js/src/master_class/master_class_main.js')]
}
css = {'scss': [resource_string(__name__, 'css/master_class/display.scss')]}
js_module_name = "MasterClass"
def get_state(self):
"""Return success json answer for client."""
total_register = len(self.passed_registrations)
message = ""
message2 = ""
if self.runtime.user.email in self.passed_registrations:
message = _("You have been registered for this master class. We will provide addition information soon.")
elif self.runtime.user.email in self.all_registrations:
message = _("You are pending for registration for this master class. Please visit this page later for result.")
else:
message2 = _("You have not been registered for this master class. Probably you have to pass a test first or there is not enough places.")
if (total_register is None):
total_register = 0
additional_data = {}
allreg = []
passreg = []
for email in self.all_registrations:
try:
user = User.objects.get(email=email)
allreg += [{'email': email, 'name': user.profile.lastname + ' ' + user.profile.firstname + ' ' + user.profile.middlename}]
except:
pass
for email in self.passed_registrations:
try:
user = User.objects.get(email=email)
passreg += [{'email': email, 'name': user.profile.lastname + ' ' + user.profile.firstname + ' ' + user.profile.middlename}]
except:
pass
if self.runtime.user_is_staff:
additional_data['all_registrations'] = allreg
additional_data['passed_registrations'] = passreg
additional_data['is_staff'] = self.runtime.user_is_staff
additional_data['csv_name'] = self.runtime.course_id + " " + self.display_name
if self.submitted and self.runtime.user.email not in self.all_registrations and self.runtime.user.email not in self.passed_registrations:
self.submitted = False
if self.submitted:
data = {
'status': 'success',
'submitted': True,
'is_closed': self.is_past_due(),
'total_places': self.total_places,
'total_register': total_register,
'message': message,
'problem_id': self.problem_id,
'auto_register_if_passed': self.auto_register_if_passed,
}
data.update(additional_data)
return json.dumps(data)
else:
data = {
'status': 'success',
'submitted': False,
'is_closed': self.is_past_due(),
'total_places': self.total_places,
'total_register': total_register,
'problem_id': self.problem_id,
'message': message2,
'auto_register_if_passed': self.auto_register_if_passed,
}
data.update(additional_data)
return json.dumps(data)
def handle_ajax(self, dispatch, data):
"""Ajax handler.
Args:
dispatch: string request slug
data: dict request get parameters
Returns:
json string
"""
if dispatch == 'submit':
if self.is_past_due():
return json.dumps({
'status': 'fail',
'error': 'Registration is closed due to date.'
})
if self.submitted:
return json.dumps({
'status': 'fail',
'error': 'You have already posted your data.'
})
# Student words from client.
# FIXME: we must use raw JSON, not a post data (multipart/form-data)
master_class = data.getall('master_class[]')
if self.problem_id is None:
self.all_registrations.append(self.runtime.user.email)
self.submitted = True
return self.get_state()
problem_location = Location(self.problem_id)
problem_descriptor = self.runtime.descriptor_runtime.modulestore.get_item(problem_location)
problem_score = self.runtime.get_score(self.runtime.course_id, self.runtime.user, problem_descriptor, self.runtime.get_module)
self.passed_masterclass_test = problem_score is not None and len(problem_score) >= 2 and problem_score[0] >= self.autopass_score
if self.passed_masterclass_test:
if self.auto_register_if_passed:
if len(self.passed_registrations) < self.total_places:
self.passed_registrations.append(self.runtime.user.email)
self.submitted = True
else:
self.all_registrations.append(self.runtime.user.email)
self.submitted = True
return self.get_state()
elif dispatch == 'get_state':
return self.get_state()
elif dispatch == 'register':
logging.error(data)
if self.runtime.user_is_staff:
for email in data.getall('emails[]'):
if (len(self.passed_registrations) < self.total_places):
if (self.all_registrations.count(email) > 0):
self.passed_registrations.append(email)
self.all_registrations.remove(email)
subject = u"Подтверждение регистрации на {masterclass}".format(masterclass=self.display_name)
body = u"Уважаемый(ая) {fullname}!\nВаша заявка на {masterclass} была одобрена. Подробности Вы можете узнать по ссылке: {url}.\nС уважением, Команда ГБОУ ЦПМ.".format(
fullname=User.objects.get(email=email).profile.name,
masterclass=self.display_name,
url='https://' + settings.SITE_NAME + '/courses/' + self.course_id + '/jump_to/{}'.format(Location(self.location))
)
mail = self.runtime.bulkmail.create(self.course_id,
self.runtime.user,
'list',
subject,
body,
location=self.id,
to_list=[email]
)
try:
mail.send()
return self.get_state()
except:
return json.dumps({
'status': 'fail',
'msg': _('Your email can not be sent.')
})
else:
return json.dumps({
'status': 'fail',
'error': _("Not enough places for this master class.")
})
return self.get_state()
else:
return json.dumps({
'status': 'fail',
'error': 'Unknown Command!'
})
elif dispatch == 'unregister':
logging.error(data)
if self.runtime.user_is_staff:
for email in data.getall('emails[]'):
if (self.passed_registrations.count(email) > 0):
self.passed_registrations.remove(email)
self.all_registrations.append(email)
return self.get_state()
else:
return json.dumps({
'status': 'fail',
'error': 'Unknown Command!'
})
elif dispatch == 'remove':
logging.error(data)
if self.runtime.user_is_staff:
for email in data.getall('emails[]'):
if (self.passed_registrations.count(email) > 0):
self.passed_registrations.remove(email)
if (self.all_registrations.count(email) > 0):
self.all_registrations.remove(email)
return self.get_state()
else:
return json.dumps({
'status': 'fail',
'error': 'Unknown Command!'
})
elif dispatch == 'csv':
if self.runtime.user_is_staff:
header = [u'Email', u'Фамилия', u'Имя', u'Отчество',]
datatable = {'header': header, 'students': []}
data = []
for email in self.passed_registrations:
datarow = []
user = User.objects.get(email=email)
datarow += [user.email, user.profile.lastname, user.profile.firstname, user.profile.middlename]
data += [datarow]
datatable['data'] = data
return self.return_csv(" ", datatable, encoding="cp1251", dialect="excel-tab")
else:
return json.dumps({
'status': 'fail',
'error': 'Unknown Command!'
})
elif dispatch == 'email':
subject = data.get('subject')
body = data.get('body')
mail = self.runtime.bulkmail.create(self.course_id, self.runtime.user, 'list', subject, body, location=self.id, to_list=self.passed_registrations)
mail.send()
return json.dumps({
'status': 'success',
'msg': _('Your email was successfully queued for sending.')
})
else:
return json.dumps({
'status': 'fail',
'error': 'Unknown Command!'
})
def is_past_due(self):
"""
Is it now past this problem's due date, including grace period?
"""
return (self.due is not None and
datetime.datetime.now(UTC()) > self.due)
def get_html(self):
"""Template rendering."""
logging.info(type(self.location))
logging.info(self.get_progress())
logging.info(self.runtime.seed)
logging.info(self.runtime.anonymous_student_id)
logging.info(self.runtime)
context = {
'display_name': self.display_name,
'due': self.due,
'element_id': self.location.html_id(),
'element_class': self.location.category,
'ajax_url': self.system.ajax_url,
'submitted': self.submitted,
'is_staff': self.runtime.user_is_staff,
'all_registrations': self.all_registrations,
'passed_registrations': self.passed_registrations
}
self.content = self.system.render_template('master_class.html', context)
return self.content
def return_csv(self, func, datatable, file_pointer=None, encoding="utf-8", dialect="excel"):
"""Outputs a CSV file from the contents of a datatable."""
if file_pointer is None:
response = StringIO.StringIO()
else:
response = file_pointer
writer = csv.writer(response, dialect=dialect, quotechar='"', quoting=csv.QUOTE_ALL)
encoded_row = [unicode(s).encode(encoding) for s in datatable['header']]
writer.writerow(encoded_row)
for datarow in datatable['data']:
encoded_row = [unicode(s).encode(encoding) for s in datarow]
writer.writerow(encoded_row)
if file_pointer is None:
return response.getvalue()
else:
return response
class MasterClassDescriptor(MasterClassFields, MetadataOnlyEditingDescriptor, EmptyDataRawDescriptor):
"""Descriptor for MasterClass Xmodule."""
module_class = MasterClassModule
template_dir_name = 'master_class'
|
torchingloom/edx-platform
|
common/lib/xmodule/xmodule/master_class_module.py
|
Python
|
agpl-3.0
| 15,766
|
[
"VisIt"
] |
f853cf1d3ed49651ba55e4448fc29d517c05b41b8be865ef941518f1706e1706
|
#
# Copyright 2014 Universidad Complutense de Madrid
#
# This file is part of Numina
#
# SPDX-License-Identifier: GPL-3.0+
# License-Filename: LICENSE.txt
#
import numpy as np
from astropy.modeling import Fittable1DModel, Parameter
class EnclosedGaussian(Fittable1DModel):
'''Enclosed Gaussian model'''
amplitude = Parameter()
stddev = Parameter()
@staticmethod
def evaluate(x, amplitude, stddev):
return amplitude * (1 - np.exp(-0.5 * (x / stddev)**2))
@staticmethod
def fit_deriv(x, amplitude, stddev):
z = (x / stddev)**2
t = np.exp(-0.5 * z)
d_amplitude = -t + 1.0
d_stddev = -amplitude * t * z / stddev
return [d_amplitude, d_stddev]
|
nicocardiel/numina
|
numina/modeling/enclosed.py
|
Python
|
gpl-3.0
| 722
|
[
"Gaussian"
] |
96e68f88995906e9fcc57c49a1ff675cf2e5aaa13ddbdd9a1aa33b0730bc0be2
|
#!/usr/bin/env python
"""Tests pycits wrapper for bowtie2-build.
Bowtie does not have native support for gzipped files, so we
work directly with FASTA/FASTQ
"""
import hashlib
import os
import shutil
from pycits import bowtie2_build
from pycits.tools import NotExecutableError
from nose.tools import nottest, assert_equal
import subprocess
# INPUT DATA
OUTDIR = os.path.join("tests", "test_out", "bowtie2-build")
TARGETDIR = os.path.join("tests", "test_targets", "bowtie2-build")
DATABASE = os.path.join("tests", "test_data", "bowtie2-build",
"database_bowtie_test.fasta")
FA_INDEX = os.path.join(OUTDIR, "fasta_index")
THREADS = "1"
# Create output directory tree
def setup():
"""Set up test fixtures"""
try:
shutil.rmtree(OUTDIR)
except FileNotFoundError:
pass
os.makedirs(OUTDIR, exist_ok=True)
# test for indexing first
def test_bowtie2_build_path():
"""bowtie2-build executable is in $PATH"""
bowtie2_build.Bowtie2_Build("bowtie2-build")
def test_bowtie2_build_exec_notexist():
"""Error thrown if bowtie_build executable does not exist"""
try:
obj = bowtie2_build.Bowtie2_Build(os.path.join(".", "bowtie2-build"))
except NotExecutableError:
return True
else:
return False
def test_bowtie2_build_cmd():
"""bowtie2-build returns correct form of cmd-line"""
bowtie2_idx = bowtie2_build.Bowtie2_Build("bowtie2-build")
target = ' '.join(["bowtie2-build",
"--quiet",
"-f",
DATABASE,
FA_INDEX])
result = bowtie2_idx.run(DATABASE, FA_INDEX, dry_run=True)
assert_equal(result.command, target)
def test_bowtie2_build_notexec():
"""Error thrown if bowtie2-build not executable"""
try:
obj = bowtie2_build.Bowtie2_Build("LICENSE")
except NotExecutableError:
return True
else:
return False
def test_bowtie2_build_exec():
"""bowtie2-build indexes the file correctly"""
bowtie2_idx = bowtie2_build.Bowtie2_Build("bowtie2-build")
result = bowtie2_idx.run(DATABASE, FA_INDEX)
# Test for equality of output and target MD5 hashes
for fname in os.listdir(OUTDIR):
with open(os.path.join(OUTDIR, fname), "rb") as outfh:
outhash = hashlib.md5()
outhash.update(outfh.read())
with open(os.path.join(TARGETDIR, fname), "rb") as tgtfh:
tgthash = hashlib.md5()
tgthash.update(tgtfh.read())
assert_equal(tgthash.digest(), outhash.digest())
|
widdowquinn/THAPBI-pycits
|
tests/test_wrapper_bowtie2_build.py
|
Python
|
mit
| 2,585
|
[
"Bowtie"
] |
dc8f565b24533a584845cb3a7141ef3440ebd9eb50d89a2ea38d01f1f162c83f
|
#!/usr/bin/env python
#author: Peter Thorpe September 2016. The James Hutton Insitute,Dundee,UK.
#Title:
#script to generate gff for ITS region BLAST hits)"
# The BLAST should already have been perfomed:
# blastn -query ITS.fasta -db genome.fasta -outfmt 6 -out ITS_vs_geome.out
#imports
import os
import sys
from sys import stdin,argv
import sys
import datetime
from optparse import OptionParser
###########################################################################
def parse_blast_tab_outfile(blast):
"""read in the blast tab file. Reads whole file into memeroy.
returns a list, one list item per blast hit.
"""
with open(blast) as file:
return file.read().split("\n")
def get_unique_hits(temp_blast_hits):
"""function to remove duplicate hits"""
identified_data_set = set([])
blast_hits = []
for result in temp_blast_hits:
blast_line = result.split("\t")
if len(blast_line) < 8:
continue
scaffold = blast_line[1]
start = blast_line[8]
stop = blast_line[9]
hit = "%s\t%s\t%s" %(scaffold, start, stop)
hit_rev = "%s\t%s\t%s" %(scaffold, stop, start)
#print hit
# chexck to see if this start stop scaff location
# has already been found.
if hit not in identified_data_set:
identified_data_set.add(hit)
identified_data_set.add(hit_rev)
blast_hits.append(result)
#print "temp blast hits", blast_hits
best_blast_hits = get_representative_blast_hit(blast_hits)
#print "best_blast_hits :", best_blast_hits
return blast_hits
def spit_blast_data(i, blast_count):
"""function to split up the blast hits
and return \t formatted data. checks start < stop
, alters these if need be..."""
# split the blast line and assign the feilds respectively
queryId, subjectId, percIdentity, alnLength,mismatchCount,\
gapOpenCount, queryStart, queryEnd, subjectStart, \
subjectEnd, eVal, bitScore = i.split("\t")
#reverse negative blast hits (breaks bamtools if not fixed)
if int(subjectStart) > int(subjectEnd):
temp_subjectStart = subjectEnd
temp_subjectEnd = subjectStart
out_format="%s\t%s\tITS_blast_hit_%d\t%s\t%s\t.\t+\t.\tITS_blast_hits_region\n" %(subjectId,\
prefix, blast_count,\
temp_subjectStart,temp_subjectEnd)
else:
#direction find. ready for writing out.
out_format= "%s\t%s\tITS_blast_hit_%d\t%s\t%s\t.\t+\t.\tITS_blast_hits_region\n" %(subjectId,\
prefix, blast_count, subjectStart,\
subjectEnd)
#print out_format
return out_format
def write_out_ITS_GFF(blast, prefix, out):
"""function to write out the ITS blast hits in a GFF3
like manner. """
# call function to get list of blast hits.
try:
blast_hits = parse_blast_tab_outfile(blast)
except:
raise ValueError("something wrong with blast out file")
GFF_out = open(out, "w")
# counter to index the blast hits in the GFF file
blast_count = 0
# santity check to remove duplicate events
already_seen_set = set([])
for i in blast_hits:
if i.startswith("#"):
#allows the outfile to have comment lines.
continue
if not i.strip():
continue #if the last line is blank
blast_count = blast_count +1
# check this is a unique blast hit. Remove duplicates!
if i not in already_seen_set:
#add this to seen set.
already_seen_set.add(i)
if len(i.split("\t")) > 12:
#remove tax id and extra coloumns - not needed.
i = i[:12]
if len(i.split("\t")) >12:
raise ValueError("""custom BLAST output?
not enough coloumns in blast file.""")
out_format = spit_blast_data(i, blast_count)
#write to file
GFF_out.write(out_format)
#close the write file
GFF_out.close()
###########################################################################
if "-v" in sys.argv or "--version" in sys.argv:
print ("v0.0.1")
sys.exit(0)
usage = """Use as follows:
Title:
script to generate gff for ITS region BLAST hits)"
The BLAST should already have been perfomed:
blastn -query ITS.fasta -db genome.fasta -outfmt 6 -out ITS_vs_geome.out
$ generate_ITS_GFF.py -b blast.out --prefix p.infestans -o gff.out
Note:
coloumns 6,7 and 8 are 'made' up for the purpose of this task.
BLAST hits on the negative strand will be inverted so the
start is always less than the end coordinate.
"""
parser = OptionParser(usage=usage)
parser.add_option("-b", "--blast", dest="blast", default="outfmt6.out",
help="the tab out file from the BLAST search",
metavar="FILE")
parser.add_option("--prefix", dest="prefix",
default="temp_name",
help="name for column 2 in GFF. Best to "
" use the origin of the data")
parser.add_option("-o", "--out_file", dest="out_file",
default="ITS_GFF.out",
help="outfile for the ITS regions in GFF format")
(options, args) = parser.parse_args()
blast = options.blast
prefix = options.prefix
out_file = options.out_file
#run the program
if not os.path.isfile(blast):
print("sorry, couldn't open the file: " + ex.strerror + "\n")
print ("current working directory is :", os.getcwd() + "\n")
print ("files are :", [f for f in os.listdir('.')])
sys_exit("\n\nInput blast file not found: %s" % blast)
# call the top function
write_out_ITS_GFF(blast, prefix, out_file)
|
widdowquinn/THAPBI
|
ITS_region_genomic_coverage/generate_ITS_GFF.py
|
Python
|
mit
| 5,803
|
[
"BLAST"
] |
c0e4c6e0ea79ff75f4ff5ccc474a216359d87f7c97bd2c10bf3b94f23d190057
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys, os
from VtkRenderer import *
import numpy as np
from RadarTransforms import *
from LidarTransforms import *
from Q50_config import *
class ImageGrabberCallback:
def __init__(self, map_file):
self.map_file = map_file
self.radar_params = LoadParameters('q50_4_3_14_params')['radar']
self.lidar_actor = None
self.radar_actors = []
self.clouds = loadLDRCamMap(map_file)
self.rdr_pts = loadRDRCamMap(map_file)
self.count = 0
def execute(self, iren, event):
fren = iren.GetRenderWindow().GetRenderers().GetFirstRenderer()
radar_data = loadRDR(self.rdr_pts[self.count])[0]
radar_data[:, :3] = calibrateRadarPts(radar_data[:, :3], self.radar_params)
if radar_data.shape[0] > 0:
mask = (radar_data[:, 5] > 5)
mask &= (radar_data[:, 6] > -20)
radar_data = radar_data[mask]
if radar_data.shape[0] > 0:
for i in xrange(len(self.radar_actors)):
fren.RemoveActor(self.radar_actors[i])
self.radar_actors = []
self.radar_clouds = []
for i in xrange(radar_data.shape[0]):
self.radar_clouds.append(VtkBoundingBox(radar_data[i, :]))
self.radar_actors.append(self.radar_clouds[i].get_vtk_box())
fren.AddActor(self.radar_actors[i])
lidar_data = loadLDR(self.clouds[self.count])
self.lidar_cloud = VtkPointCloud(lidar_data[:, :3], lidar_data[:,3])
fren.RemoveActor(self.lidar_actor)
self.lidar_actor = self.lidar_cloud.get_vtk_cloud(zMin=0, zMax=255)
fren.AddActor(self.lidar_actor)
if self.count == 0:
fren.ResetCamera()
fren.GetActiveCamera().Zoom(1.6)
self.count += 1
iren.GetRenderWindow().Render()
if __name__ == '__main__':
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetSize(1280/2, 960/2)
renderer = vtk.vtkRenderer()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
mouseInteractor = vtk.vtkInteractorStyleTrackballCamera()
renderWindowInteractor.SetInteractorStyle(mouseInteractor)
renderWindow.Render()
cb = ImageGrabberCallback(sys.argv[1])
renderWindowInteractor.AddObserver('TimerEvent', cb.execute)
timerId = renderWindowInteractor.CreateRepeatingTimer(1)
renderWindowInteractor.Start()
|
sameeptandon/sail-car-log
|
process/tests/testDrawRadar.py
|
Python
|
bsd-2-clause
| 2,580
|
[
"VTK"
] |
0f090b4d8f140da3b58dadde69b95d530ec44a966ff6fdaa67f2db328e72b7c7
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
try:
from StringIO import StringIO
except Exception:
from io import StringIO
import numpy as np
from ..sile import add_sile
from sisl._internal import set_module
from sisl.physics import DensityMatrix
from sisl.utils import *
from sisl.unit.siesta import unit_convert
from .tbt import tbtncSileTBtrans
__all__ = ['tbtprojncSileTBtrans']
Bohr2Ang = unit_convert('Bohr', 'Ang')
Ry2eV = unit_convert('Ry', 'eV')
Ry2K = unit_convert('Ry', 'K')
eV2Ry = unit_convert('eV', 'Ry')
@set_module("sisl.io.tbtrans")
class tbtprojncSileTBtrans(tbtncSileTBtrans):
""" TBtrans projection file object """
_trans_type = 'TBT.Proj'
@classmethod
def _mol_proj_elec(self, elec_mol_proj):
""" Parse the electrode-molecule-projection str/tuple into the molecule-projected-electrode
Parameters
----------
elec_mol_proj : str or tuple
electrode-molecule-projection
"""
if isinstance(elec_mol_proj, str):
elec_mol_proj = elec_mol_proj.split('.')
if len(elec_mol_proj) == 1:
return elec_mol_proj
elif len(elec_mol_proj) != 3:
raise ValueError(f"Projection specification does not contain 3 fields: <electrode>.<molecule>.<projection> is required.")
return [elec_mol_proj[i] for i in [1, 2, 0]]
@property
def elecs(self):
""" List of electrodes """
elecs = []
# in cases of not calculating all
# electrode transmissions we must ensure that
# we add the last one
for group in self.groups.keys():
if group in elecs:
continue
if 'mu' in self.groups[group].variables.keys():
elecs.append(group)
return elecs
@property
def molecules(self):
""" List of regions where state projections may happen """
mols = []
for mol in self.groups.keys():
if len(self.groups[mol].groups) > 0:
# this is a group with groups!
mols.append(mol)
return mols
def projections(self, molecule):
""" List of projections on `molecule`
Parameters
----------
molecule : str
name of molecule to retrieve projections on
"""
mol = self.groups[molecule]
return list(mol.groups.keys())
def ADOS(self, elec_mol_proj, E=None, kavg=True, atoms=None, orbitals=None, sum=True, norm='none'):
r""" Projected spectral density of states (DOS) (1/eV)
Extract the projected spectral DOS from electrode `elec` on a selected subset of atoms/orbitals in the device region
.. math::
\mathrm{ADOS}_\mathfrak{el}(E) = \frac{1}{2\pi N} \sum_{\nu\in \mathrm{atom}/\mathrm{orbital}} [\mathbf{G}(E)|i\rangle\langle i|\Gamma_\mathfrak{el}|i\rangle\langle i|\mathbf{G}^\dagger]_{\nu\nu}(E)
where :math:`|i\rangle` may be a sum of states.
The normalization constant (:math:`N`) is defined in the routine `norm` and depends on the
arguments.
Parameters
----------
elec_mol_proj: str or tuple
originating projected spectral function (<electrode>.<molecule>.<projection>)
E : float or int, optional
optionally only return the DOS of atoms at a given energy point
kavg: bool, int or array_like, optional
whether the returned DOS is k-averaged, an explicit k-point
or a selection of k-points
atoms : array_like of int or bool, optional
only return for a given set of atoms (default to all).
*NOT* allowed with `orbital` keyword
orbitals : array_like of int or bool, optional
only return for a given set of orbitals (default to all)
*NOT* allowed with `atoms` keyword
sum : bool, optional
whether the returned quantities are summed or returned *as is*, i.e. resolved per atom/orbital.
norm : {'none', 'atom', 'orbital', 'all'}
how the normalization of the summed DOS is performed (see `norm` routine).
"""
mol_proj_elec = self._mol_proj_elec(elec_mol_proj)
return self._DOS(self._value_E('ADOS', mol_proj_elec, kavg=kavg, E=E), atoms, orbitals, sum, norm) * eV2Ry
def transmission(self, elec_mol_proj_from, elec_mol_proj_to, kavg=True):
""" Transmission from `mol_proj_elec_from` to `mol_proj_elec_to`
Parameters
----------
elec_mol_proj_from: str or tuple
the originating scattering projection (<electrode>.<molecule>.<projection>)
elec_mol_proj_to: str or tuple
the absorbing scattering projection (<electrode>.<molecule>.<projection>)
kavg: bool, int or array_like, optional
whether the returned transmission is k-averaged, an explicit k-point
or a selection of k-points
See Also
--------
transmission_eig : projected transmission decomposed in eigenchannels
"""
mol_proj_elec = self._mol_proj_elec(elec_mol_proj_from)
if not isinstance(elec_mol_proj_to, str):
elec_mol_proj_to = '.'.join(elec_mol_proj_to)
return self._value_avg(elec_mol_proj_to + '.T', mol_proj_elec, kavg=kavg)
def transmission_eig(self, elec_mol_proj_from, elec_mol_proj_to, kavg=True):
""" Transmission eigenvalues from `elec_mol_proj_from` to `elec_mol_proj_to`
Parameters
----------
elec_mol_proj_from: str or tuple
the originating scattering projection (<electrode>.<molecule>.<projection>)
elec_mol_proj_to: str or tuple
the absorbing scattering projection (<electrode>.<molecule>.<projection>)
kavg: bool, int or array_like, optional
whether the returned transmission is k-averaged, an explicit k-point
or a selection of k-points
See Also
--------
transmission : projected transmission
"""
mol_proj_elec = self._mol_proj_elec(elec_mol_proj_from)
if not isinstance(elec_mol_proj_to, str):
elec_mol_proj_to = '.'.join(elec_mol_proj_to)
return self._value_avg(elec_mol_proj_to + '.T.Eig', mol_proj_elec, kavg=kavg)
def Adensity_matrix(self, elec_mol_proj, E, kavg=True, isc=None, orbitals=None, geometry=None):
r""" Projected spectral function density matrix at energy `E` (1/eV)
The projected density matrix can be used to calculate the LDOS in real-space.
The :math:`\mathrm{LDOS}(E, \mathbf r)` may be calculated using the `~sisl.physics.DensityMatrix.density`
routine. Basically the LDOS in real-space may be calculated as
.. math::
\rho_{\mathbf A_{\mathfrak{el}}}(E, \mathbf r) = \frac{1}{2\pi}\sum_{\nu\mu}\phi_\nu(\mathbf r)\phi_\mu(\mathbf r) \Re[\mathbf A_{\mathfrak{el}, \nu\mu}(E)]
where :math:`\phi` are the orbitals. Note that the broadening used in the TBtrans calculations
ensures the broadening of the density, i.e. it should not be necessary to perform energy
averages over the density matrices.
Parameters
----------
elec_mol_proj: str or tuple
the projected electrode of originating electrons
E : float or int
the energy or the energy index of density matrix. If an integer
is passed it is the index, otherwise the index corresponding to
``Eindex(E)`` is used.
kavg: bool, int or array_like, optional
whether the returned density matrix is k-averaged, an explicit k-point
or a selection of k-points
isc: array_like, optional
the returned density matrix from unit-cell (``[None, None, None]``) to
the given supercell, the default is all density matrix elements for the supercell.
To only get unit cell orbital currents, pass ``[0, 0, 0]``.
orbitals : array-like or dict, optional
only retain density matrix elements for a subset of orbitals, all
other are set to 0.
geometry: Geometry, optional
geometry that will be associated with the density matrix. By default the
geometry contained in this file will be used. However, then the
atomic species are probably incorrect, nor will the orbitals contain
the basis-set information required to generate the required density
in real-space.
Returns
-------
DensityMatrix: the object containing the Geometry and the density matrix elements
"""
mol_proj_elec = self._mol_proj_elec(elec_mol_proj)
dm = self._sparse_data('DM', mol_proj_elec, E, kavg, isc, orbitals) * eV2Ry
# Now create the density matrix object
geom = self.read_geometry()
if geometry is None:
DM = DensityMatrix.fromsp(geom, dm)
else:
if geom.no != geometry.no:
raise ValueError(self.__class__.__name__ + '.Adensity_matrix requires input geometry to contain the correct number of orbitals. Please correct input!')
DM = DensityMatrix.fromsp(geometry, dm)
return DM
def orbital_ACOOP(self, elec_mol_proj, E, kavg=True, isc=None, orbitals=None):
r""" Orbital COOP analysis of the projected spectral function
This will return a sparse matrix, see `~scipy.sparse.csr_matrix` for details.
Each matrix element of the sparse matrix corresponds to the COOP of the
underlying geometry.
The COOP analysis can be written as:
.. math::
\mathrm{COOP}^{\mathbf A}_{\nu\mu} = \frac{1}{2\pi} \Re\big[\mathbf A_{\nu\mu} \mathbf S_{\mu\nu} \big]
The sum of the COOP DOS is equal to the DOS:
.. math::
\mathrm{ADOS}_{\nu} = \sum_\mu \mathrm{COOP}^{\mathbf A}_{\nu\mu}
One can calculate the (diagonal) balanced COOP analysis, see JPCM 15 (2003),
7751-7761 for details. The DBCOOP is given by:
.. math::
D &= \sum_\nu \mathrm{COOP}^{\mathbf A}_{\nu\nu}
\\
\mathrm{DBCOOP}^{\mathbf A}_{\nu\mu} &= \mathrm{COOP}^{\mathbf A}_{\nu\mu} / D
The BCOOP can be looked up in the reference above.
Parameters
----------
elec_mol_proj: str or tuple
the electrode of the spectral function
E: float or int
the energy or the energy index of COOP. If an integer
is passed it is the index, otherwise the index corresponding to
``Eindex(E)`` is used.
kavg: bool, int or array_like, optional
whether the returned COOP is k-averaged, an explicit k-point
or a selection of k-points
isc: array_like, optional
the returned COOP from unit-cell (``[None, None, None]``) to
the given supercell, the default is all COOP for the supercell.
To only get unit cell orbital currents, pass ``[0, 0, 0]``.
orbitals : array-like or dict, optional
only retain COOP matrix elements for a subset of orbitals, all
other are set to 0.
Examples
--------
>>> ACOOP = tbt.orbital_ACOOP('Left.C60.HOMO', -1.0) # COOP @ E = -1 eV from ``Left.C60.HOMO`` spectral function
>>> ACOOP[10, 11] # COOP value between the 11th and 12th orbital
>>> ACOOP.sum(1).A[tbt.o_dev, 0] == tbt.ADOS(0, sum=False)[tbt.Eindex(-1.0)]
>>> D = ACOOP.diagonal().sum()
>>> ADBCOOP = ACOOP / D
See Also
--------
atom_COOP_from_orbital : transfer an orbital COOP to atomic COOP
atom_ACOOP : atomic COOP analysis of the projected spectral function
atom_COHP_from_orbital : atomic COHP analysis from an orbital COHP
orbital_ACOHP : orbital resolved COHP analysis of the projected spectral function
atom_ACOHP : atomic COHP analysis of the projected spectral function
"""
mol_proj_elec = self._mol_proj_elec(elec_mol_proj)
COOP = self._sparse_data('COOP', mol_proj_elec, E, kavg, isc, orbitals) * eV2Ry
return COOP
def orbital_ACOHP(self, elec_mol_proj, E, kavg=True, isc=None, orbitals=None):
r""" Orbital COHP analysis of the projected spectral function
This will return a sparse matrix, see ``scipy.sparse.csr_matrix`` for details.
Each matrix element of the sparse matrix corresponds to the COHP of the
underlying geometry.
The COHP analysis can be written as:
.. math::
\mathrm{COHP}^{\mathbf A}_{\nu\mu} = \frac{1}{2\pi} \Re\big[\mathbf A_{\nu\mu}
\mathbf H_{\nu\mu} \big]
Parameters
----------
elec_mol_proj: str or tuple
the electrode of the projected spectral function
E: float or int
the energy or the energy index of COHP. If an integer
is passed it is the index, otherwise the index corresponding to
``Eindex(E)`` is used.
kavg: bool, int or array_like, optional
whether the returned COHP is k-averaged, an explicit k-point
or a selection of k-points
isc: array_like, optional
the returned COHP from unit-cell (``[None, None, None]``) to
the given supercell, the default is all COHP for the supercell.
To only get unit cell orbital currents, pass ``[0, 0, 0]``.
orbitals : array-like or dict, optional
only retain COHP matrix elements for a subset of orbitals, all
other are set to 0.
See Also
--------
atom_COHP_from_orbital : atomic COHP analysis from an orbital COHP
atom_ACOHP : atomic COHP analysis of the projected spectral function
atom_COOP_from_orbital : transfer an orbital COOP to atomic COOP
orbital_ACOOP : orbital resolved COOP analysis of the projected spectral function
atom_ACOOP : atomic COOP analysis of the projected spectral function
"""
mol_proj_elec = self._mol_proj_elec(elec_mol_proj)
COHP = self._sparse_data('COHP', mol_proj_elec, E, kavg, isc, orbitals)
return COHP
@default_ArgumentParser(description="Extract data from a TBT.Proj.nc file")
def ArgumentParser(self, p=None, *args, **kwargs):
""" Returns the arguments that is available for this Sile """
p, namespace = super().ArgumentParser(p, *args, **kwargs)
# We limit the import to occur here
import argparse
def ensure_E(func):
""" This decorater ensures that E is the first element in the _data container """
def assign_E(self, *args, **kwargs):
ns = args[1]
if len(ns._data) == 0:
# We immediately extract the energies
ns._data.append(ns._tbt.E[ns._Erng].flatten())
ns._data_header.append('Energy[eV]')
return func(self, *args, **kwargs)
return assign_E
class InfoMols(argparse.Action):
def __call__(self, parser, ns, value, option_string=None):
print(' '.join(ns._tbt.molecules))
p.add_argument('--molecules', '-M', nargs=0,
action=InfoMols,
help="""Show molecules in the projection file""")
class InfoProjs(argparse.Action):
def __call__(self, parser, ns, value, option_string=None):
print(' '.join(ns._tbt.projections(value[0])))
p.add_argument('--projections', '-P', nargs=1, metavar='MOL',
action=InfoProjs,
help="""Show projections on molecule.""")
class DataDOS(argparse.Action):
@collect_action
@ensure_E
def __call__(self, parser, ns, value, option_string=None):
data = ns._tbt.ADOS(value, kavg=ns._krng, orbitals=ns._Orng, norm=ns._norm)
ns._data_header.append(f'ADOS[1/eV]:{value}')
NORM = int(ns._tbt.norm(orbitals=ns._Orng, norm=ns._norm))
# The flatten is because when ns._Erng is None, then a new
# dimension (of size 1) is created
ns._data.append(data[ns._Erng].flatten())
if ns._Orng is None:
ns._data_description.append('Column {} is sum of all device atoms+orbitals with normalization 1/{}'.format(len(ns._data), NORM))
else:
ns._data_description.append('Column {} is atoms[orbs] {} with normalization 1/{}'.format(len(ns._data), ns._Ovalue, NORM))
p.add_argument('--ados', '-AD', metavar='E.M.P',
action=DataDOS, default=None,
help="""Store projected spectral DOS""")
class DataT(argparse.Action):
@collect_action
@ensure_E
def __call__(self, parser, ns, values, option_string=None):
elec_mol_proj1 = values[0]
elec_mol_proj2 = values[1]
# Grab the information
data = ns._tbt.transmission(elec_mol_proj1, elec_mol_proj2, kavg=ns._krng)[ns._Erng]
data.shape = (-1,)
ns._data.append(data)
ns._data_header.append(f'T:{elec_mol_proj1}-{elec_mol_proj2}')
ns._data_description.append('Column {} is transmission from {} to {}'.format(len(ns._data), elec_mol_proj1, elec_mol_proj2))
p.add_argument('-T', '--transmission', nargs=2, metavar=('E.M.P1', 'E.M.P2'),
action=DataT,
help='Store transmission between two projections.')
class DataTEig(argparse.Action):
@collect_action
@ensure_E
def __call__(self, parser, ns, values, option_string=None):
elec_mol_proj1 = values[0]
elec_mol_proj2 = values[1]
# Grab the information
data = ns._tbt.transmission_eig(elec_mol_proj1, elec_mol_proj2, kavg=ns._krng)[ns._Erng]
neig = data.shape[-1]
for eig in range(neig):
ns._data.append(data[ns._Erng, ..., eig].flatten())
ns._data_header.append('Teig({}):{}-{}'.format(eig+1, elec_mol_proj1, elec_mol_proj2))
ns._data_description.append('Column {} is transmission eigenvalues from electrode {} to {}'.format(len(ns._data), elec_mol_proj1, elec_mol_proj2))
p.add_argument('-Teig', '--transmission-eig', nargs=2, metavar=('E.M.P1', 'E.M.P2'),
action=DataTEig,
help='Store transmission eigenvalues between two projections.')
return p, namespace
def info(self, molecule=None):
""" Information about the calculated quantities available for extracting in this file
Parameters
----------
molecule : str or int
the molecule to request information from
"""
# Create a StringIO object to retain the information
out = StringIO()
# Create wrapper function
def prnt(*args, **kwargs):
option = kwargs.pop('option', None)
if option is None:
print(*args, file=out)
else:
print('{:70s}[{}]'.format(' '.join(args), ', '.join(option)), file=out)
def truefalse(bol, string, fdf=None, suf=2):
if bol:
true(string, fdf, suf)
else:
prnt("{}- {}: false".format(' ' * suf, string), option=fdf)
def true(string, fdf=None, suf=2):
prnt("{}+ {}: true".format(' ' * suf, string), option=fdf)
# Retrieve the device atoms
prnt("Device information:")
if self._k_avg:
prnt(" - all data is k-averaged")
else:
# Print out some more information related to the
# k-point sampling.
# However, we still do not know whether TRS is
# applied.
kpt = self.k
nA = len(np.unique(kpt[:, 0]))
nB = len(np.unique(kpt[:, 1]))
nC = len(np.unique(kpt[:, 2]))
prnt((" - number of kpoints: {} <- "
"[ A = {} , B = {} , C = {} ] (time-reversal unknown)").format(self.nk, nA, nB, nC))
prnt(" - energy range:")
E = self.E
Em, EM = np.amin(E), np.amax(E)
dE = np.diff(E)
dEm, dEM = np.amin(dE) * 1000, np.amax(dE) * 1000 # convert to meV
if (dEM - dEm) < 1e-3: # 0.001 meV
prnt(f" {Em:.5f} -- {EM:.5f} eV [{dEm:.3f} meV]")
else:
prnt(f" {Em:.5f} -- {EM:.5f} eV [{dEm:.3f} -- {dEM:.3f} meV]")
prnt(" - imaginary part (eta): {:.4f} meV".format(self.eta() * 1e3))
prnt(" - atoms with DOS (1-based):")
prnt(" " + list2str(self.a_dev + 1))
prnt(" - number of BTD blocks: {}".format(self.n_btd()))
if molecule is None:
mols = self.molecules
else:
mols = [molecule]
def _get_all(opt, vars):
out = []
indices = []
for i, var in enumerate(vars):
if var.endswith(opt):
out.append(var[:-len(opt)])
indices.append(i)
indices.sort(reverse=True)
for i in indices:
vars.pop(i)
return out
def _print_to(ns, var):
elec_mol_proj = var.split('.')
if len(elec_mol_proj) == 1:
prnt(" " * ns + "-> {elec}".format(elec=elec_mol_proj[0]))
elif len(elec_mol_proj) == 3:
elec2, mol2, proj2 = elec_mol_proj
prnt(" " * ns + f"-> {elec2}.{mol2}.{proj2}")
def _print_to_full(s, vars):
if len(vars) == 0:
return
ns = len(s)
prnt(s)
for var in vars:
_print_to(ns, var)
eig_kwargs = {'precision': 4, 'threshold': 1e6, 'suffix': '', 'prefix': ''}
# Print out information for each electrode
for mol in mols:
opt = {'mol1': mol}
gmol = self.groups[mol]
prnt()
prnt(f"Molecule: {mol}")
prnt(" - molecule atoms (1-based):")
prnt(" " + list2str(gmol.variables['atom'][:]))
# molecule states and eigenvalues stored
lvls = gmol.variables['lvl'][:]
lvls = np.where(lvls < 0, lvls + 1, lvls) + gmol.HOMO_index
eigs = gmol.variables['eig'][:] * Ry2eV
prnt(f" - state indices (1-based) (total={lvls.size}):")
prnt(" " + list2str(lvls))
prnt(" - state eigenvalues (eV):")
prnt(" " + np.array2string(eigs[lvls-1], **eig_kwargs)[1:-1])
projs = self.projections(mol)
prnt(" - number of projections: {}".format(len(projs)))
for proj in projs:
opt['proj1'] = proj
gproj = gmol.groups[proj]
prnt(" > Projection: {mol1}.{proj1}".format(**opt))
# Also pretty print the eigenvalues associated with these
lvls = gproj.variables['lvl'][:]
lvls = np.where(lvls < 0, lvls + 1, lvls) + gmol.HOMO_index
prnt(f" - state indices (1-based) (total={lvls.size}):")
prnt(" " + list2str(lvls))
prnt(" - state eigenvalues:")
prnt(" " + np.array2string(eigs[lvls-1], **eig_kwargs)[1:-1])
# Figure out the electrode projections
elecs = gproj.groups.keys()
for elec in elecs:
opt['elec1'] = elec
gelec = gproj.groups[elec]
vars = list(gelec.variables.keys()) # ensure a copy
prnt(" > Electrode: {elec1}.{mol1}.{proj1}".format(**opt))
# Loop and figure out what is in it.
if 'ADOS' in vars:
vars.pop(vars.index('ADOS'))
true("DOS spectral", ['TBT.Projs.DOS.A'], suf=8)
if 'J' in vars:
vars.pop(vars.index('J'))
true("orbital-current", ['TBT.Projs.Current.Orb'], suf=8)
if 'DM' in vars:
vars.pop(vars.index('DM'))
true("Density matrix spectral", ['TBT.Projs.DM.A'], suf=8)
if 'COOP' in vars:
vars.pop(vars.index('COOP'))
true("COOP spectral", ['TBT.Projs.COOP.A'], suf=8)
if 'COHP' in vars:
vars.pop(vars.index('COHP'))
true("COHP spectral", ['TBT.Projs.COHP.A'], suf=8)
# Retrieve all vars with transmissions
vars_T = _get_all('.T', vars)
vars_Teig = _get_all('.T.Eig', vars)
vars_C = _get_all('.C', vars)
vars_Ceig = _get_all('.C.Eig', vars)
_print_to_full(" + transmission:", vars_T)
_print_to_full(" + transmission (eigen):", vars_Teig)
_print_to_full(" + transmission out corr.:", vars_C)
_print_to_full(" + transmission out corr. (eigen):", vars_Ceig)
# Finally there may be only RHS projections in which case the remaining groups are for
# *pristine* electrodes
for elec in self.elecs:
gelec = self.groups[elec]
vars = list(gelec.variables.keys()) # ensure a copy
try:
bloch = self.bloch(elec)
except:
bloch = [1] * 3
try:
n_btd = self.n_btd(elec)
except:
n_btd = 'unknown'
prnt()
prnt(f"Electrode: {elec}")
prnt(f" - number of BTD blocks: {n_btd}")
prnt(" - Bloch: [{}, {}, {}]".format(*bloch))
gelec = self.groups[elec]
if 'TBT' in self._trans_type:
prnt(" - chemical potential: {:.4f} eV".format(self.chemical_potential(elec)))
prnt(" - electron temperature: {:.2f} K".format(self.electron_temperature(elec)))
else:
prnt(" - phonon temperature: {:.4f} K".format(self.phonon_temperature(elec)))
prnt(" - imaginary part (eta): {:.4f} meV".format(self.eta(elec) * 1e3))
# Retrieve all vars with transmissions
vars_T = _get_all('.T', vars)
vars_Teig = _get_all('.T.Eig', vars)
vars_C = _get_all('.C', vars)
vars_Ceig = _get_all('.C.Eig', vars)
_print_to_full(" + transmission:", vars_T)
_print_to_full(" + transmission (eigen):", vars_Teig)
_print_to_full(" + transmission out corr.:", vars_C)
_print_to_full(" + transmission out corr. (eigen):", vars_Ceig)
s = out.getvalue()
out.close()
return s
def eigenstate(self, molecule, k=None, all=True):
r""" Return the eigenstate on the projected `molecule`
The eigenstate object will contain the geometry as the parent object.
The eigenstate will be in the Lowdin basis:
.. math::
|\psi'_i\rangle = \mathbf S^{1/2} |\psi_i\rangle
Parameters
----------
molecule : str
name of the molecule to retrieve the eigenstate from
k : optional
k-index for retrieving a specific k-point (default to all)
all : bool, optional
whether all states should be returned
Returns
-------
EigenstateElectron
"""
if 'PHT' in self._trans_type:
from sisl.physics import EigenmodePhonon as cls
else:
from sisl.physics import EigenstateElectron as cls
mol = self.groups[molecule]
if all and ('states' in mol.variables or 'Restates' in mol.variables):
suf = 'states'
else:
all = False
suf = 'state'
is_gamma = suf in mol.variables
if is_gamma:
state = mol.variables[suf][:]
else:
state = mol.variables['Re' + suf][:] + 1j * mol.variables['Im' + suf][:]
eig = mol.variables['eig'][:]
if eig.ndim > 1:
raise NotImplementedError(self.__class__.__name__ + ".eigenstate currently does not implement "
"the k-point version.")
geom = self.read_geometry()
if all:
return cls(state, eig, parent=geom)
lvl = mol.variables['lvl'][:]
lvl = np.where(lvl > 0, lvl - 1, lvl) + mol.HOMO_index
return cls(state, eig[lvl], parent=geom)
for _name in ['current', 'current_parameter',
'shot_noise', 'noise_power', 'fano',
'density_matrix',
'orbital_COOP', 'atom_COOP',
'orbital_COHP', 'atom_COHP']:
setattr(tbtprojncSileTBtrans, _name, None)
add_sile('TBT.Proj.nc', tbtprojncSileTBtrans)
# Add spin-dependent files
add_sile('TBT_DN.Proj.nc', tbtprojncSileTBtrans)
add_sile('TBT_UP.Proj.nc', tbtprojncSileTBtrans)
|
zerothi/sisl
|
sisl/io/tbtrans/tbtproj.py
|
Python
|
mpl-2.0
| 29,596
|
[
"SIESTA"
] |
a50169b04a3f095be80ec452695f2032a8b7ebe4d3adbf710d11b78a73d2c847
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from django.db.models import Q
import mock
from actstream.models import Action, Follow
from nose.tools import eq_, ok_, raises
from taggit.models import Tag
import kitsune.sumo.models
from kitsune.flagit.models import FlaggedObject
from kitsune.search.tests.test_es import ElasticTestCase
from kitsune.questions.cron import auto_archive_old_questions
from kitsune.questions.events import QuestionReplyEvent
from kitsune.questions import models
from kitsune.questions.models import (
Answer, Question, QuestionMetaData, QuestionVisits,
_tenths_version, _has_beta, VoteMetadata, InvalidUserException,
AlreadyTakenException)
from kitsune.questions.tasks import update_answer_pages
from kitsune.questions.tests import (
TestCaseBase, tags_eq, question, answer, questionvote)
from kitsune.questions import config
from kitsune.sumo import googleanalytics
from kitsune.sumo.tests import TestCase
from kitsune.tags.tests import tag
from kitsune.tags.utils import add_existing_tag
from kitsune.users.tests import user
from kitsune.wiki.tests import translated_revision
class TestAnswer(TestCaseBase):
"""Test the Answer model"""
def test_new_answer_updates_question(self):
"""Test saving a new answer updates the corresponding question.
Specifically, last_post and num_replies should update."""
q = question(title='Test Question', content='Lorem Ipsum Dolor',
save=True)
updated = q.updated
eq_(0, q.num_answers)
eq_(None, q.last_answer)
a = answer(question=q, content='Test Answer', save=True)
a.save()
q = Question.objects.get(pk=q.id)
eq_(1, q.num_answers)
eq_(a, q.last_answer)
self.assertNotEqual(updated, q.updated)
def test_delete_question_removes_flag(self):
"""Deleting a question also removes the flags on that question."""
q = question(title='Test Question', content='Lorem Ipsum Dolor',
save=True)
u = user(save=True)
FlaggedObject.objects.create(
status=0, content_object=q, reason='language', creator_id=u.id)
eq_(1, FlaggedObject.objects.count())
q.delete()
eq_(0, FlaggedObject.objects.count())
def test_delete_answer_removes_flag(self):
"""Deleting an answer also removes the flags on that answer."""
q = question(title='Test Question', content='Lorem Ipsum Dolor',
save=True)
a = answer(question=q, content='Test Answer', save=True)
u = user(save=True)
FlaggedObject.objects.create(
status=0, content_object=a, reason='language', creator_id=u.id)
eq_(1, FlaggedObject.objects.count())
a.delete()
eq_(0, FlaggedObject.objects.count())
def test_delete_last_answer_of_question(self):
"""Deleting the last_answer of a Question should update the question.
"""
yesterday = datetime.now() - timedelta(days=1)
q = answer(created=yesterday, save=True).question
last_answer = q.last_answer
# add a new answer and verify last_answer updated
a = answer(question=q, content='Test Answer', save=True)
q = Question.objects.get(pk=q.id)
eq_(q.last_answer.id, a.id)
# delete the answer and last_answer should go back to previous value
a.delete()
q = Question.objects.get(pk=q.id)
eq_(q.last_answer.id, last_answer.id)
eq_(Answer.objects.filter(pk=a.id).count(), 0)
def test_delete_solution_of_question(self):
"""Deleting the solution of a Question should update the question.
"""
# set a solution to the question
q = answer(save=True).question
solution = q.last_answer
q.solution = solution
q.save()
# delete the solution and question.solution should go back to None
solution.delete()
q = Question.objects.get(pk=q.id)
eq_(q.solution, None)
def test_update_page_task(self):
a = answer(save=True)
a.page = 4
a.save()
a = Answer.objects.get(pk=a.id)
assert a.page == 4
update_answer_pages(a.question)
a = Answer.objects.get(pk=a.id)
assert a.page == 1
def test_delete_updates_pages(self):
a1 = answer(save=True)
a2 = answer(question=a1.question, save=True)
answer(question=a1.question, save=True)
a1.page = 7
a1.save()
a2.delete()
a3 = Answer.objects.filter(question=a1.question)[0]
assert a3.page == 1, "Page was %s" % a3.page
def test_creator_num_answers(self):
a = answer(save=True)
eq_(a.creator_num_answers, 1)
answer(creator=a.creator, save=True)
eq_(a.creator_num_answers, 2)
def test_creator_num_solutions(self):
a = answer(save=True)
q = a.question
q.solution = a
q.save()
eq_(a.creator_num_solutions, 1)
def test_content_parsed_with_locale(self):
"""Make sure links to localized articles work."""
rev = translated_revision(locale='es', is_approved=True, save=True)
doc = rev.document
doc.title = u'Un mejor título'
doc.save()
q = question(locale='es', save=True)
a = answer(question=q, content='[[%s]]' % doc.title, save=True)
assert 'es/kb/%s' % doc.slug in a.content_parsed
def test_creator_follows(self):
a = answer(save=True)
follows = Follow.objects.filter(user=a.creator)
# It's a pain to filter this from the DB, since follow_object is a
# ContentType field, so instead, do it in Python.
eq_(len(follows), 2)
answer_follow = [f for f in follows if f.follow_object == a][0]
question_follow = [f for f in follows if f.follow_object == a.question][0]
eq_(question_follow.actor_only, False)
eq_(answer_follow.actor_only, False)
class TestQuestionMetadata(TestCaseBase):
"""Tests handling question metadata"""
def setUp(self):
super(TestQuestionMetadata, self).setUp()
# add a new Question to test with
self.question = question(
title='Test Question', content='Lorem Ipsum Dolor', save=True)
def test_add_metadata(self):
"""Test the saving of metadata."""
metadata = {'version': u'3.6.3', 'os': u'Windows 7'}
self.question.add_metadata(**metadata)
saved = QuestionMetaData.objects.filter(question=self.question)
eq_(dict((x.name, x.value) for x in saved), metadata)
def test_metadata_property(self):
"""Test the metadata property on Question model."""
self.question.add_metadata(crash_id='1234567890')
eq_('1234567890', self.question.metadata['crash_id'])
def test_product_property(self):
"""Test question.product property."""
self.question.add_metadata(product='desktop')
eq_(config.products['desktop'], self.question.product_config)
def test_category_property(self):
"""Test question.category property."""
self.question.add_metadata(product='desktop')
self.question.add_metadata(category='fix-problems')
eq_(config.products['desktop']['categories']['fix-problems'],
self.question.category_config)
def test_clear_mutable_metadata(self):
"""Make sure it works and clears the internal cache.
crash_id should get cleared, while product, category, and useragent
should remain.
"""
q = self.question
q.add_metadata(product='desktop', category='fix-problems',
useragent='Fyerfocks', crash_id='7')
q.metadata
q.clear_mutable_metadata()
md = q.metadata
assert 'crash_id' not in md, \
"clear_mutable_metadata() didn't clear the cached metadata."
eq_(dict(product='desktop', category='fix-problems',
useragent='Fyerfocks'),
md)
def test_auto_tagging(self):
"""Make sure tags get applied based on metadata on first save."""
Tag.objects.create(slug='green', name='green')
Tag.objects.create(slug='Fix problems', name='fix-problems')
q = self.question
q.add_metadata(product='desktop', category='fix-problems',
ff_version='3.6.8', os='GREen')
q.save()
q.auto_tag()
tags_eq(q, ['desktop', 'fix-problems', 'Firefox 3.6.8', 'Firefox 3.6',
'green'])
def test_auto_tagging_aurora(self):
"""Make sure versions with prerelease suffix are tagged properly."""
q = self.question
q.add_metadata(ff_version='18.0a2')
q.save()
q.auto_tag()
tags_eq(q, ['Firefox 18.0'])
def test_auto_tagging_restraint(self):
"""Auto-tagging shouldn't tag unknown Firefox versions or OSes."""
q = self.question
q.add_metadata(ff_version='allyourbase', os='toaster 1.0')
q.save()
q.auto_tag()
tags_eq(q, [])
def test_tenths_version(self):
"""Test the filter that turns 1.2.3 into 1.2."""
eq_(_tenths_version('1.2.3beta3'), '1.2')
eq_(_tenths_version('1.2rc'), '1.2')
eq_(_tenths_version('1.w'), '')
def test_has_beta(self):
"""Test the _has_beta helper."""
assert _has_beta('5.0', {'5.0b3': '2011-06-01'})
assert not _has_beta('6.0', {'5.0b3': '2011-06-01'})
assert not _has_beta('5.5', {'5.0b3': '2011-06-01'})
assert _has_beta('5.7', {'5.7b1': '2011-06-01'})
assert _has_beta('11.0', {'11.0b7': '2011-06-01'})
assert not _has_beta('10.0', {'11.0b7': '2011-06-01'})
class QuestionTests(TestCaseBase):
"""Tests for Question model"""
def test_save_updated(self):
"""Saving with the `update` option should update `updated`."""
q = question(save=True)
updated = q.updated
q.save(update=True)
self.assertNotEqual(updated, q.updated)
def test_save_no_update(self):
"""Saving without the `update` option shouldn't update `updated`."""
q = question(save=True)
updated = q.updated
q.save()
eq_(updated, q.updated)
def test_default_manager(self):
"""Assert Question's default manager is SUMO's ManagerBase.
This is easy to get wrong when mixing in taggability.
"""
eq_(Question._default_manager.__class__,
kitsune.questions.managers.QuestionManager)
def test_notification_created(self):
"""Creating a new question auto-watches it for answers."""
u = user(save=True)
q = question(creator=u, title='foo', content='bar', save=True)
assert QuestionReplyEvent.is_notifying(u, q)
def test_no_notification_on_update(self):
"""Saving an existing question does not watch it."""
q = question(save=True)
QuestionReplyEvent.stop_notifying(q.creator, q)
assert not QuestionReplyEvent.is_notifying(q.creator, q)
q.save()
assert not QuestionReplyEvent.is_notifying(q.creator, q)
def test_is_solved_property(self):
a = answer(save=True)
q = a.question
assert not q.is_solved
q.solution = a
q.save()
assert q.is_solved
def test_recent_counts(self):
"""Verify recent_asked_count and recent unanswered count."""
# create a question for each of past 4 days
now = datetime.now()
question(created=now, save=True)
question(created=now - timedelta(hours=12), save=True, is_locked=True)
q = question(created=now - timedelta(hours=23), save=True)
answer(question=q, save=True)
# 25 hours instead of 24 to avoid random test fails.
question(created=now - timedelta(hours=25), save=True)
# Only 3 are recent from last 72 hours, 1 has an answer.
eq_(3, Question.recent_asked_count())
eq_(1, Question.recent_unanswered_count())
def test_recent_counts_with_filter(self):
"""Verify that recent_asked_count and recent_unanswered_count
respect filters passed."""
now = datetime.now()
question(created=now, locale='en-US', save=True)
q = question(created=now, locale='en-US', save=True)
answer(question=q, save=True)
question(created=now, locale='pt-BR', save=True)
question(created=now, locale='pt-BR', save=True)
q = question(created=now, locale='pt-BR', save=True)
answer(question=q, save=True)
# 5 asked recently, 3 are unanswered
eq_(5, Question.recent_asked_count())
eq_(3, Question.recent_unanswered_count())
# check english (2 asked, 1 unanswered)
locale_filter = Q(locale='en-US')
eq_(2, Question.recent_asked_count(locale_filter))
eq_(1, Question.recent_unanswered_count(locale_filter))
# check pt-BR (3 asked, 2 unanswered)
locale_filter = Q(locale='pt-BR')
eq_(3, Question.recent_asked_count(locale_filter))
eq_(2, Question.recent_unanswered_count(locale_filter))
def test_from_url(self):
"""Verify question returned from valid URL."""
q = question(save=True)
eq_(q, Question.from_url('/en-US/questions/%s' % q.id))
eq_(q, Question.from_url('/es/questions/%s' % q.id))
eq_(q, Question.from_url('/questions/%s' % q.id))
def test_from_url_id_only(self):
"""Verify question returned from valid URL."""
# When requesting the id, the existence of the question isn't checked.
eq_(123, Question.from_url('/en-US/questions/123', id_only=True))
eq_(234, Question.from_url('/es/questions/234', id_only=True))
eq_(345, Question.from_url('/questions/345', id_only=True))
def test_from_invalid_url(self):
"""Verify question returned from valid URL."""
q = question(save=True)
eq_(None, Question.from_url('/en-US/questions/%s/edit' % q.id))
eq_(None, Question.from_url('/en-US/kb/%s' % q.id))
eq_(None, Question.from_url('/random/url'))
eq_(None, Question.from_url('/en-US/questions/dashboard/metrics'))
def test_editable(self):
q = question(save=True)
assert q.editable # unlocked/unarchived
q.is_archived = True
assert not q.editable # unlocked/archived
q.is_locked = True
assert not q.editable # locked/archived
q.is_archived = False
assert not q.editable # locked/unarchived
q.is_locked = False
assert q.editable # unlocked/unarchived
def test_age(self):
now = datetime.now()
ten_days_ago = now - timedelta(days=10)
thirty_seconds_ago = now - timedelta(seconds=30)
q1 = question(created=ten_days_ago, save=True)
q2 = question(created=thirty_seconds_ago, save=True)
# This test relies on datetime.now() being called in the age
# property, so this delta check makes it less likely to fail
# randomly.
assert abs(q1.age - 10 * 24 * 60 * 60) < 2, ('q1.age (%s) != 10 days'
% q1.age)
assert abs(q2.age - 30) < 2, 'q2.age (%s) != 30 seconds' % q2.age
def test_is_taken(self):
q = question(save=True)
u = user(save=True)
eq_(q.is_taken, False)
q.taken_by = u
q.taken_until = datetime.now() + timedelta(seconds=600)
q.save()
eq_(q.is_taken, True)
q.taken_by = None
q.taken_until = None
q.save()
eq_(q.is_taken, False)
def test_take(self):
u = user(save=True)
q = question(save=True)
q.take(u)
eq_(q.taken_by, u)
ok_(q.taken_until is not None)
@raises(InvalidUserException)
def test_take_creator(self):
q = question(save=True)
q.take(q.creator)
@raises(AlreadyTakenException)
def test_take_twice_fails(self):
u1 = user(save=True)
u2 = user(save=True)
q = question(save=True)
q.take(u1)
q.take(u2)
def test_take_twice_same_user_refreshes_time(self):
u = user(save=True)
first_taken_until = datetime.now() - timedelta(minutes=5)
q = question(taken_by=u, taken_until=first_taken_until, save=True)
q.take(u)
ok_(q.taken_until > first_taken_until)
def test_take_twice_forced(self):
u1 = user(save=True)
u2 = user(save=True)
q = question(save=True)
q.take(u1)
q.take(u2, force=True)
eq_(q.taken_by, u2)
def test_taken_until_is_set(self):
u = user(save=True)
q = question(save=True)
q.take(u)
assert q.taken_until > datetime.now()
def test_is_taken_clears(self):
u = user(save=True)
taken_until = datetime.now() - timedelta(seconds=30)
q = question(taken_by=u, taken_until=taken_until, save=True)
# Testin q.is_taken should clear out ``taken_by`` and ``taken_until``,
# since taken_until is in the past.
eq_(q.is_taken, False)
eq_(q.taken_by, None)
eq_(q.taken_until, None)
def test_creator_follows(self):
q = question(save=True)
f = Follow.objects.get(user=q.creator)
eq_(f.follow_object, q)
eq_(f.actor_only, False)
class AddExistingTagTests(TestCaseBase):
"""Tests for the add_existing_tag helper function."""
def setUp(self):
super(AddExistingTagTests, self).setUp()
self.untagged_question = question(save=True)
def test_tags_manager(self):
"""Make sure the TaggableManager exists.
Full testing of functionality is a matter for taggit's tests.
"""
tags_eq(self.untagged_question, [])
def test_add_existing_case_insensitive(self):
"""Assert add_existing_tag works case-insensitively."""
tag(name='lemon', slug='lemon', save=True)
add_existing_tag('LEMON', self.untagged_question.tags)
tags_eq(self.untagged_question, [u'lemon'])
@raises(Tag.DoesNotExist)
def test_add_existing_no_such_tag(self):
"""Assert add_existing_tag doesn't work when the tag doesn't exist."""
add_existing_tag('nonexistent tag', self.untagged_question.tags)
class OldQuestionsArchiveTest(ElasticTestCase):
def test_archive_old_questions(self):
last_updated = datetime.now() - timedelta(days=100)
# created just now
q1 = question(save=True)
# created 200 days ago
q2 = question(created=(datetime.now() - timedelta(days=200)),
updated=last_updated,
save=True)
# created 200 days ago, already archived
q3 = question(created=(datetime.now() - timedelta(days=200)),
is_archived=True,
updated=last_updated,
save=True)
self.refresh()
auto_archive_old_questions()
# There are three questions.
eq_(len(list(Question.objects.all())), 3)
# q2 and q3 are now archived and updated times are the same
archived_questions = list(Question.objects.filter(is_archived=True))
eq_(sorted([(q.id, q.updated.date()) for q in archived_questions]),
[(q.id, q.updated.date()) for q in [q2, q3]])
# q1 is still unarchived.
archived_questions = list(Question.objects.filter(is_archived=False))
eq_(sorted([q.id for q in archived_questions]),
[q1.id])
class QuestionVisitsTests(TestCase):
"""Tests for the pageview statistics gathering."""
# Need to monkeypatch close_old_connections out because it
# does something screwy with the testing infra around transactions.
@mock.patch.object(models, 'close_old_connections')
@mock.patch.object(googleanalytics, 'pageviews_by_question', )
def test_visit_count_from_analytics(self, pageviews_by_question,
close_old_connections):
"""Verify stored visit counts from mocked data."""
q1 = question(save=True)
q2 = question(save=True)
q3 = question(save=True)
pageviews_by_question.return_value = {
q1.id: 42,
q2.id: 27,
q3.id: 1337,
123459: 3,
}
QuestionVisits.reload_from_analytics()
eq_(3, QuestionVisits.objects.count())
eq_(42, QuestionVisits.objects.get(question_id=q1.id).visits)
eq_(27, QuestionVisits.objects.get(question_id=q2.id).visits)
eq_(1337, QuestionVisits.objects.get(question_id=q3.id).visits)
# Change the data and run again to cover the update case.
pageviews_by_question.return_value = {
q1.id: 100,
q2.id: 200,
q3.id: 300,
}
QuestionVisits.reload_from_analytics()
eq_(3, QuestionVisits.objects.count())
eq_(100, QuestionVisits.objects.get(question_id=q1.id).visits)
eq_(200, QuestionVisits.objects.get(question_id=q2.id).visits)
eq_(300, QuestionVisits.objects.get(question_id=q3.id).visits)
class QuestionVoteTests(TestCase):
def test_add_metadata_over_1000_chars(self):
qv = questionvote(save=True)
qv.add_metadata('test1', 'a'*1001)
metadata = VoteMetadata.objects.all()[0]
eq_('a'*1000, metadata.value)
class TestActions(TestCase):
def test_question_create_action(self):
"""When a question is created, an Action is created too."""
q = question(save=True)
a = Action.objects.action_object(q).get()
eq_(a.actor, q.creator)
eq_(a.verb, 'asked')
eq_(a.target, None)
def test_answer_create_action(self):
"""When an answer is created, an Action is created too."""
q = question(save=True)
ans = answer(question=q, save=True)
act = Action.objects.action_object(ans).get()
eq_(act.actor, ans.creator)
eq_(act.verb, 'answered')
eq_(act.target, q)
def test_question_change_no_action(self):
"""When a question is changed, no Action should be created."""
q = question(save=True)
Action.objects.all().delete()
q.save() # trigger another post_save hook
eq_(Action.objects.count(), 0)
def test_answer_change_no_action(self):
"""When an answer is changed, no Action should be created."""
q = question(save=True)
Action.objects.all().delete()
q.save() # trigger another post_save hook
eq_(Action.objects.count(), 0)
def test_question_solved_makes_action(self):
"""When an answer is marked as the solution to a question, an Action should be created."""
ans = answer(save=True)
Action.objects.all().delete()
ans.question.set_solution(ans, ans.question.creator)
act = Action.objects.action_object(ans).get()
eq_(act.actor, ans.question.creator)
eq_(act.verb, 'marked as a solution')
eq_(act.target, ans.question)
|
orvi2014/kitsune
|
kitsune/questions/tests/test_models.py
|
Python
|
bsd-3-clause
| 23,222
|
[
"VisIt"
] |
79e5d018a135e3ebcb209d87b2c7fc5333be2dde43ac63a81602fd7462a85a39
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Set up an electrokinetics (LB) fluid confined between charged walls.
"""
import espressomd
required_features = ["ELECTROKINETICS", "EK_BOUNDARIES", "EXTERNAL_FORCES"]
espressomd.assert_features(required_features)
from espressomd import System, shapes, electrokinetics, ekboundaries
import os
system = System(box_l=[10, 10, 10])
system.set_random_state_PRNG()
#system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
system.cell_system.skin = 0.4
system.time_step = 0.1
ek = electrokinetics.Electrokinetics(
lb_density=1, friction=1, agrid=1, viscosity=1, T=1, prefactor=1)
pos = electrokinetics.Species(
density=0.05, D=0.1, valency=1, ext_force_density=[0, 0, 1.])
neg = electrokinetics.Species(
density=0.05, D=0.1, valency=-1, ext_force_density=[0, 0, -1.])
ek.add_species(pos)
ek.add_species(neg)
system.actors.add(ek)
print(ek.get_params())
print(pos.get_params())
print(neg.get_params())
print(pos[5, 5, 5].density)
ek_wall_left = ekboundaries.EKBoundary(
shape=shapes.Wall(dist=1, normal=[1, 0, 0]), charge_density=-0.01)
ek_wall_right = ekboundaries.EKBoundary(
shape=shapes.Wall(dist=-9, normal=[-1, 0, 0]), charge_density=0.01)
system.ekboundaries.add(ek_wall_left)
system.ekboundaries.add(ek_wall_right)
if not os.path.isdir("ek"):
os.makedirs("ek")
n_int_cycles = 1000
for i in range(n_int_cycles):
system.integrator.run(100)
print("\rIntegrating: %03i" % i, end='', flush=True)
pos.print_vtk_density("ek/pos_dens_%i.vtk" % i)
neg.print_vtk_density("ek/neg_dens_%i.vtk" % i)
pos.print_vtk_flux("ek/pos_flux_%i.vtk" % i)
neg.print_vtk_flux("ek/neg_flux_%i.vtk" % i)
ek.print_vtk_velocity("ek/ekv_%i.vtk" % i)
ek.print_vtk_boundary("ek/ekb_%i.vtk" % i)
|
psci2195/espresso-ffans
|
samples/ekboundaries.py
|
Python
|
gpl-3.0
| 2,458
|
[
"ESPResSo",
"VTK"
] |
4a48096d9985aaf688491f1d22f278d69097d7d10d6a35ee6be54651417b2853
|
''' file name : canny.py
Description : This sample shows how to find edges using canny edge detection
This is Python version of this tutorial : http://opencv.itseez.com/doc/tutorials/imgproc/imgtrans/canny_detector/canny_detector.html
Level : Beginner
Benefits : Learn to apply canny edge detection to images.
Usage : python canny.py
Written by : Abid K. (abidrahman2@gmail.com) , Visit opencvpython.blogspot.com for more tutorials '''
import cv2
import numpy as np
def CannyThreshold(lowThreshold):
detected_edges = cv2.GaussianBlur(gray,(3,3),0)
detected_edges = cv2.Canny(detected_edges,lowThreshold,lowThreshold*ratio,apertureSize = kernel_size)
dst = cv2.bitwise_and(img,img,mask = detected_edges) # just add some colours to edges from original image.
cv2.imshow('canny demo',dst)
lowThreshold = 0
max_lowThreshold = 100
ratio = 3
kernel_size = 3
img = cv2.imread('sampel.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
cv2.namedWindow('canny demo')
cv2.createTrackbar('Min threshold','canny demo',lowThreshold, max_lowThreshold, CannyThreshold)
CannyThreshold(0) # initialization
if cv2.waitKey(0) == 27:
cv2.destroyAllWindows()
# visit for output results : http://opencvpython.blogspot.com/2012/06/image-derivatives-sobel-and-scharr.html
|
vickydasta/musicboxandhumanbrain
|
edge.py
|
Python
|
mit
| 1,285
|
[
"VisIt"
] |
40292c71c224ee0edc093f384de940aa7499ac5cade80e2fef2a12b96caee5e6
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from . import alert
from . import appsyntax
from . import autolink
from . import bibtex
from . import command
from . import core
from . import devel
from . import floats
from . import common
from . import include
from . import media
from . import package
from . import sqa
from . import table
from . import materialicon
from . import layout
from . import config
from . import style
|
nuclear-wizard/moose
|
python/MooseDocs/extensions/__init__.py
|
Python
|
lgpl-2.1
| 691
|
[
"MOOSE"
] |
916b666a160690ebc03b8a2db1f994a08c84fabd193fd91061f3fa98d7d6e0c4
|
from distutils.core import setup
setup(
name='dbb',
version='1.0.6',
author='Donovan Parks',
author_email='donovan.parks@gmail.com',
packages=['dbb', 'dbb.plots'],
scripts=['bin/dbb'],
package_data={'dbb': ['data/*.txt']},
url='http://pypi.python.org/pypi/dbb/',
license='GPL3',
description='Bin scaffolds into population genomes.',
long_description=open('README.md').read(),
install_requires=[
"numpy >= 1.6.1",
"scipy >= 0.10.1",
"matplotlib >= 1.3.0",
"pysam >= 0.7.4, <0.8.0"],
)
|
dparks1134/DBB
|
setup.py
|
Python
|
gpl-3.0
| 565
|
[
"pysam"
] |
2d7e3925d628cb24537523bc3da8a622785bebf62c5594b43a864bce4dce371b
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
useme2psivar = {
# <<< DFT >>>
'DFT.usemeraw': 'DFT FUNCTIONAL TOTAL ENERGY', # for herding. plays well with other uses?
#'-nobas.DFTdX.usemedash': 'DISPERSION CORRECTION ENERGY', # for herding. plays well with other uses?
#'DHDFT.usemeraw': 'DOUBLE-HYBRID CORRECTION ENERGY', # for herding. plays well with other uses? # violation of conventions to get plain dhdft E!
'blyp.usemeraw': 'BLYP FUNCTIONAL TOTAL ENERGY',
'blypd2.usemedash': 'BLYP-D2 DISPERSION CORRECTION ENERGY',
'blypd3.usemedash': 'BLYP-D3 DISPERSION CORRECTION ENERGY',
'blypd3bj.usemedash': 'BLYP-D3(BJ) DISPERSION CORRECTION ENERGY',
'blypd3m.usemedash': 'BLYP-D3M DISPERSION CORRECTION ENERGY',
'blypd3mbj.usemedash': 'BLYP-D3M(BJ) DISPERSION CORRECTION ENERGY',
'b3lyp.usemeraw': 'B3LYP FUNCTIONAL TOTAL ENERGY',
'b3lypd2.usemedash': 'B3LYP-D2 DISPERSION CORRECTION ENERGY',
'b3lypd3.usemedash': 'B3LYP-D3 DISPERSION CORRECTION ENERGY',
'b3lypd3bj.usemedash': 'B3LYP-D3(BJ) DISPERSION CORRECTION ENERGY',
'b3lypxdm.usemedash': 'B3LYP-XDM DISPERSION CORRECTION ENERGY',
'b3lypd3m.usemedash': 'B3LYP-D3M DISPERSION CORRECTION ENERGY',
'b3lypd3mbj.usemedash': 'B3LYP-D3M(BJ) DISPERSION CORRECTION ENERGY',
'b2plyp.usemeraw': 'B2PLYP TOTAL ENERGY', # no psivar for fctl + dh, which would be the more restrictive def
'b2plypd2.usemedash': 'B2PLYP-D2 DISPERSION CORRECTION ENERGY',
'b2plypd3.usemedash': 'B2PLYP-D3 DISPERSION CORRECTION ENERGY',
'b2plypd3bj.usemedash': 'B2PLYP-D3(BJ) DISPERSION CORRECTION ENERGY',
'b2plypd3m.usemedash': 'B2PLYP-D3M DISPERSION CORRECTION ENERGY',
'b2plypd3mbj.usemedash': 'B2PLYP-D3M(BJ) DISPERSION CORRECTION ENERGY',
'b970.usemeraw': 'B970 FUNCTIONAL TOTAL ENERGY',
'b970d2.usemedash': 'B970-D2 DISPERSION CORRECTION ENERGY',
'b97.usemeraw': 'B97 FUNCTIONAL TOTAL ENERGY',
'b97d2.usemedash': 'B97-D2 DISPERSION CORRECTION ENERGY',
'b97d3.usemedash': 'B97-D3 DISPERSION CORRECTION ENERGY',
'b97d3bj.usemedash': 'B97-D3(BJ) DISPERSION CORRECTION ENERGY',
'b97d3m.usemedash': 'B97-D3M DISPERSION CORRECTION ENERGY',
'b97d3mbj.usemedash': 'B97-D3M(BJ) DISPERSION CORRECTION ENERGY',
'bp86.usemeraw': 'BP86 FUNCTIONAL TOTAL ENERGY',
'bp86d2.usemedash': 'BP86-D2 DISPERSION CORRECTION ENERGY',
'bp86d3.usemedash': 'BP86-D3 DISPERSION CORRECTION ENERGY',
'bp86d3bj.usemedash': 'BP86-D3(BJ) DISPERSION CORRECTION ENERGY',
'bp86d3m.usemedash': 'BP86-D3M DISPERSION CORRECTION ENERGY',
'bp86d3mbj.usemedash': 'BP86-D3M(BJ) DISPERSION CORRECTION ENERGY',
'wb97x.usemeraw': 'WB97X FUNCTIONAL TOTAL ENERGY',
'wb97xd.usemeraw': 'WB97X-D TOTAL ENERGY',
'wb97xd.usemedash': 'WB97X-D DISPERSION CORRECTION ENERGY',
'wb97x2.usemeraw': 'WB97X-2 TOTAL ENERGY', # no psivar for fctl + dh, which would be the more restrictive def
'wb97xv.usemeraw': 'WB97X-V TOTAL ENERGY',
'm052x.usemeraw': 'M05-2X FUNCTIONAL TOTAL ENERGY',
'm052xd3.usemedash': 'M05-2X-D3 DISPERSION CORRECTION ENERGY',
'm062x.usemeraw': 'M06-2X FUNCTIONAL TOTAL ENERGY',
'm062xd3.usemedash': 'M06-2X-D3 DISPERSION CORRECTION ENERGY',
'pbe.usemeraw': 'PBE FUNCTIONAL TOTAL ENERGY',
'pbed2.usemedash': 'PBE-D2 DISPERSION CORRECTION ENERGY',
'pbed3.usemedash': 'PBE-D3 DISPERSION CORRECTION ENERGY',
'pbed3bj.usemedash': 'PBE-D3(BJ) DISPERSION CORRECTION ENERGY',
'pbed3m.usemedash': 'PBE-D3M DISPERSION CORRECTION ENERGY',
'pbed3mbj.usemedash': 'PBE-D3M(BJ) DISPERSION CORRECTION ENERGY',
'pbe0.usemeraw': 'PBE0 FUNCTIONAL TOTAL ENERGY',
'pbe0d2.usemedash': 'PBE0-D2 DISPERSION CORRECTION ENERGY',
'pbe0d3.usemedash': 'PBE0-D3 DISPERSION CORRECTION ENERGY',
'pbe0d3bj.usemedash': 'PBE0-D3(BJ) DISPERSION CORRECTION ENERGY',
'pbe0d3m.usemedash': 'PBE0-D3M DISPERSION CORRECTION ENERGY',
'pbe0d3mbj.usemedash': 'PBE0-D3M(BJ) DISPERSION CORRECTION ENERGY',
'wpbe.usemeraw': 'WPBE FUNCTIONAL TOTAL ENERGY',
'wpbed3.usemedash': 'WPBE-D3 DISPERSION CORRECTION ENERGY',
'wpbed3bj.usemedash': 'WPBE-D3(BJ) DISPERSION CORRECTION ENERGY',
'wpbed3m.usemedash': 'WPBE-D3M DISPERSION CORRECTION ENERGY',
'wpbed3mbj.usemedash': 'WPBE-D3M(BJ) DISPERSION CORRECTION ENERGY',
'xyg3.usemeraw': 'XYG3 TOTAL ENERGY', # no psivar for fctl + dh, which would be the more restrictive def
'vv10.usemeraw': 'VV10 FUNCTIONAL TOTAL ENERGY',
'lcvv10.usemeraw': 'LC-VV10 FUNCTIONAL TOTAL ENERGY',
'dsdpbep86.usemeraw': 'DSD-PBEP86 TOTAL ENERGY', # no psivar for fctl + dh, which would be the more restrictive def # also DSD technically implies -D
'dsdpbep86d2.usemedash': 'DSD-PBEP86-D2 DISPERSION CORRECTION ENERGY',
'dsdpbep86d3.usemedash': 'DSD-PBEP86-D3 DISPERSION CORRECTION ENERGY',
'dsdpbep86d3bj.usemedash': 'DSD-PBEP86-D3(BJ) DISPERSION CORRECTION ENERGY',
'm08hx.usemeraw': 'M08-HX FUNCTIONAL TOTAL ENERGY',
'm08so.usemeraw': 'M08-SO FUNCTIONAL TOTAL ENERGY',
'm11.usemeraw': 'M11 FUNCTIONAL TOTAL ENERGY',
'm11l.usemeraw': 'M11L FUNCTIONAL TOTAL ENERGY',
'pbe02.usemeraw': 'PBE0-2 TOTAL ENERGY', # no psivar for fctl + dh, which would be the more restrictive def
'dldf.usemeraw': 'DLDF FUNCTIONAL TOTAL ENERGY',
'dldfd.usemedash': 'DLDF+D DISPERSION CORRECTION ENERGY',
# <<< WFN >>>
#'usemeraw': 'HF TOTAL ENERGY',
'usemeraw': 'SCF TOTAL ENERGY',
'mp2.usemecorl': 'MP2 CORRELATION ENERGY',
'mp3.usemecorl': 'MP3 CORRELATION ENERGY',
'mp4.usemecorl': 'MP4 CORRELATION ENERGY',
'ccsd.usemecorl': 'CCSD CORRELATION ENERGY',
'ccsdt.usemecorl': 'CCSD(T) CORRELATION ENERGY',
'ccsdfullt.usemecorl': 'CCSDT CORRELATION ENERGY',
'ccsdtq.usemecorl': 'CCSDT(Q) CORRELATION ENERGY',
'fno.usemecrct': 'FNO CORRECTION ENERGY',
'fnomp3.usemecorl': 'MP3 FNO CORRELATION ENERGY',
'fnoccsd.usemecorl': 'CCSD FNO CORRELATION ENERGY',
'fnoccsdt.usemecorl': 'CCSD(T) FNO CORRELATION ENERGY',
'ccsdt.usemecrct': '(T) CORRECTION ENERGY',
'ccsdtq.usemecrct': '(Q) CORRECTION ENERGY',
'mp2.usemetrip': 'MP2 SAME-SPIN CORRELATION ENERGY',
'mp3.usemetrip': 'MP3 SAME-SPIN CORRELATION ENERGY',
'ccsd.usemetrip': 'CCSD SAME-SPIN CORRELATION ENERGY',
# <<< F12 >>>
'f12.usemeraw': 'HF-CABS TOTAL ENERGY',
'mp2f12.usemecorl': 'MP2-F12 CORRELATION ENERGY',
'ccsdaf12.usemecorl': 'CCSD-F12A CORRELATION ENERGY',
'ccsdbf12.usemecorl': 'CCSD-F12B CORRELATION ENERGY',
'ccsdcf12.usemecorl': 'CCSD-F12C CORRELATION ENERGY',
'ccsdnstaf12.usemecorl': 'CCSD(T)-F12A CORRELATION ENERGY',
'ccsdstaf12.usemecorl': 'CCSD(T*)-F12A CORRELATION ENERGY',
'ccsdtaf12.usemecorl': 'CCSD(T**)-F12A CORRELATION ENERGY',
'ccsdnstbf12.usemecorl': 'CCSD(T)-F12B CORRELATION ENERGY',
'ccsdstbf12.usemecorl': 'CCSD(T*)-F12B CORRELATION ENERGY',
'ccsdtbf12.usemecorl': 'CCSD(T**)-F12B CORRELATION ENERGY',
'ccsdnstcf12.usemecorl': 'CCSD(T)-F12C CORRELATION ENERGY',
'ccsdstcf12.usemecorl': 'CCSD(T*)-F12C CORRELATION ENERGY',
'ccsdtcf12.usemecorl': 'CCSD(T**)-F12C CORRELATION ENERGY',
'ccsdnstabf12.usemecrct': '(T)-F12AB CORRECTION ENERGY',
'ccsdstabf12.usemecrct': '(T*)-F12AB CORRECTION ENERGY',
'ccsdtabf12.usemecrct': '(T**)-F12AB CORRECTION ENERGY',
'ccsdnstcf12.usemecrct': '(T)-F12C CORRECTION ENERGY',
'ccsdstcf12.usemecrct': '(T*)-F12C CORRECTION ENERGY',
'ccsdtcf12.usemecrct': '(T**)-F12C CORRECTION ENERGY',
'mp2f12.usemetrip': 'MP2-F12 SAME-SPIN CORRELATION ENERGY',
'ccsdaf12.usemetrip': 'CCSD-F12A SAME-SPIN CORRELATION ENERGY',
'ccsdbf12.usemetrip': 'CCSD-F12B SAME-SPIN CORRELATION ENERGY',
'ccsdcf12.usemetrip': 'CCSD-F12C SAME-SPIN CORRELATION ENERGY',
# <<< SAPT >>>
'usemesapt': None,
'usemedftsapt': None,
'usemempsapt': None,
#'usemempsapt': 'MP2C DISP20 ENERGY',
'mp2cDisp20': 'MP2C DISP20 ENERGY',
'E1pol': 'DFT-SAPT ELST10,R ENERGY',
'E1exch': 'DFT-SAPT EXCH10 ENERGY',
'E1exch(S2)': 'DFT-SAPT EXCH10(S^2) ENERGY', # ne'er used
'E2ind': 'DFT-SAPT IND20,R ENERGY',
'E2ind-exch': 'DFT-SAPT EXCH-IND20,R ENERGY',
'E2disp': 'DFT-SAPT DISP20 ENERGY',
'E2disp-exch': 'DFT-SAPT EXCH-DISP20 ENERGY',
'Elst10,r': 'SAPT ELST10,R ENERGY',
'Elst12,r': 'SAPT ELST12,R ENERGY',
'Elst13,r': 'SAPT ELST13,R ENERGY',
'Exch10': 'SAPT EXCH10 ENERGY',
'Exch10(S^2)': 'SAPT EXCH10(S^2) ENERGY',
'Exch11(S^2)': 'SAPT EXCH11(S^2) ENERGY',
'Exch12(S^2)': 'SAPT EXCH12(S^2) ENERGY',
'Ind20,r': 'SAPT IND20,R ENERGY',
'Exch-Ind20,r': 'SAPT EXCH-IND20,R ENERGY',
'Ind22': 'SAPT IND22 ENERGY',
'Exch-Ind22': 'SAPT EXCH-IND22 ENERGY',
'Ind30,r': 'SAPT IND30,R ENERGY',
'Exch-Ind30,r': 'SAPT EXCH-IND30,R ENERGY',
'Ind-Disp30': 'SAPT IND-DISP30 ENERGY',
'Exch-Ind-Disp30': 'SAPT EXCH-IND-DISP30 ENERGY',
'Disp20': 'SAPT DISP20 ENERGY',
'Exch-Disp20': 'SAPT EXCH-DISP20 ENERGY',
#'Disp20(OS)': 'SAPT DISP20(OS) ENERGY',
#'Exch-Disp20(OS)': 'SAPT EXCH-DISP20(OS) ENERGY',
'Disp20(SS)': 'SAPT SAME-SPIN DISP20 ENERGY',
'Exch-Disp20(SS)': 'SAPT SAME-SPIN EXCH-DISP20 ENERGY',
'Disp21': 'SAPT DISP21 ENERGY',
'Disp22(SDQ)': 'SAPT DISP22(SDQ) ENERGY', # added for modern parsing, may confuse old usemesapt parsing
#'Disp22(T)': 'SAPT DISP22(T) ENERGY', # ditto # ne'er used
'Disp22(SDQ).1': 'SAPT DISP22(SDQ) ENERGY',
#'Disp22(T).1': 'SAPT DISP22(T) ENERGY', # ne'er used # edited to remove est
'Est.Disp22(T)': 'SAPT EST.DISP22(T) ENERGY',
'Disp2(CCD)': 'SAPT DISP2(CCD) ENERGY',
'Disp22(S)(CCD)': 'SAPT DISP22(S)(CCD) ENERGY',
#'Disp22(T)(CCD)': 'SAPT DISP22(T)(CCD) ENERGY', # ne'er used
'Est.Disp22(T)(CCD)': 'SAPT EST.DISP22(T)(CCD) ENERGY',
'Disp30': 'SAPT DISP30 ENERGY',
'Exch-Disp30': 'SAPT EXCH-DISP30 ENERGY',
'TotalHF': 'SAPT HF TOTAL ENERGY',
#'deltaHF,r(2)': None, # ne'er used
#'deltaHF,r(3)': None, # ne'er used
}
psivar2useme = dict((v, k) for k, v in useme2psivar.items())
optclue2psivar = {
'full': ['CCSD CORRELATION ENERGY', 'CCSD TOTAL ENERGY',
'CCSD(T) TOTAL ENERGY', 'CCSD(T) CORRELATION ENERGY', '(T) CORRECTION ENERGY',
'CCSDT TOTAL ENERGY', 'CCSDT CORRELATION ENERGY',
'CCSDT(Q) TOTAL ENERGY', 'CCSDT(Q) CORRELATION ENERGY', '(Q) CORRECTION ENERGY'],
'fno1e3': ['CCSD(T) FNO CORRELATION ENERGY', 'CCSD FNO CORRELATION ENERGY', 'MP3 FNO CORRELATION ENERGY', 'FNO CORRECTION ENERGY',
'CCSD CORRELATION ENERGY', 'CCSD TOTAL ENERGY',
'CCSD(T) TOTAL ENERGY', 'CCSD(T) CORRELATION ENERGY', '(T) CORRECTION ENERGY',
'CCSDT TOTAL ENERGY', 'CCSDT CORRELATION ENERGY',
'CCSDT(Q) TOTAL ENERGY', 'CCSDT(Q) CORRELATION ENERGY', '(Q) CORRECTION ENERGY'],
'fno1e4': ['CCSD(T) FNO CORRELATION ENERGY', 'CCSD FNO CORRELATION ENERGY', 'MP3 FNO CORRELATION ENERGY', 'FNO CORRECTION ENERGY',
'CCSD CORRELATION ENERGY', 'CCSD TOTAL ENERGY',
'CCSD(T) TOTAL ENERGY', 'CCSD(T) CORRELATION ENERGY', '(T) CORRECTION ENERGY',
'CCSDT TOTAL ENERGY', 'CCSDT CORRELATION ENERGY',
'CCSDT(Q) TOTAL ENERGY', 'CCSDT(Q) CORRELATION ENERGY', '(Q) CORRECTION ENERGY'],
'fno1e5': ['CCSD(T) FNO CORRELATION ENERGY', 'CCSD FNO CORRELATION ENERGY', 'MP3 FNO CORRELATION ENERGY', 'FNO CORRECTION ENERGY',
'CCSD CORRELATION ENERGY', 'CCSD TOTAL ENERGY',
'CCSD(T) TOTAL ENERGY', 'CCSD(T) CORRELATION ENERGY', '(T) CORRECTION ENERGY',
'CCSDT TOTAL ENERGY', 'CCSDT CORRELATION ENERGY',
'CCSDT(Q) TOTAL ENERGY', 'CCSDT(Q) CORRELATION ENERGY', '(Q) CORRECTION ENERGY'],
'fno5e5': ['CCSD(T) FNO CORRELATION ENERGY', 'CCSD FNO CORRELATION ENERGY', 'MP3 FNO CORRELATION ENERGY', 'FNO CORRECTION ENERGY',
'CCSD CORRELATION ENERGY', 'CCSD TOTAL ENERGY',
'CCSD(T) TOTAL ENERGY', 'CCSD(T) CORRELATION ENERGY', '(T) CORRECTION ENERGY',
'CCSDT TOTAL ENERGY', 'CCSDT CORRELATION ENERGY',
'CCSDT(Q) TOTAL ENERGY', 'CCSDT(Q) CORRELATION ENERGY', '(Q) CORRECTION ENERGY'],
'fno1e6': ['CCSD(T) FNO CORRELATION ENERGY', 'CCSD FNO CORRELATION ENERGY', 'MP3 FNO CORRELATION ENERGY', 'FNO CORRECTION ENERGY',
'CCSD CORRELATION ENERGY', 'CCSD TOTAL ENERGY',
'CCSD(T) TOTAL ENERGY', 'CCSD(T) CORRELATION ENERGY', '(T) CORRECTION ENERGY',
'CCSDT TOTAL ENERGY', 'CCSDT CORRELATION ENERGY',
'CCSDT(Q) TOTAL ENERGY', 'CCSDT(Q) CORRELATION ENERGY', '(Q) CORRECTION ENERGY'],
'dsrgs0p1':['MP2 CORRELATION ENERGY', 'MP2 TOTAL ENERGY', 'MP2 SAME-SPIN CORRELATION ENERGY'],
'dsrgs0p5':['MP2 CORRELATION ENERGY', 'MP2 TOTAL ENERGY', 'MP2 SAME-SPIN CORRELATION ENERGY'],
'dsrgs1p0':['MP2 CORRELATION ENERGY', 'MP2 TOTAL ENERGY', 'MP2 SAME-SPIN CORRELATION ENERGY'],
'mrcc': ['CCSD CORRELATION ENERGY',
'CCSD(T) CORRELATION ENERGY', '(T) CORRECTION ENERGY',
'CCSDT CORRELATION ENERGY',
'CCSDT(Q) CORRELATION ENERGY', '(Q) CORRECTION ENERGY'],
'nfc': ['B2PLYP TOTAL ENERGY', 'B2PLYP-D2 TOTAL ENERGY', 'B2PLYP-D3 TOTAL ENERGY', 'B2PLYP-D3(BJ) TOTAL ENERGY',
'B2PLYP-D3M TOTAL ENERGY', 'B2PLYP-D3M(BJ) TOTAL ENERGY',
'DSD-PBEP86 TOTAL ENERGY', 'DSD-PBEP86-D2 TOTAL ENERGY', 'DSD-PBEP86-D3 TOTAL ENERGY', 'DSD-PBEP86-D3(BJ) TOTAL ENERGY',
'WB97X-2 TOTAL ENERGY'],
'fc': ['B2PLYP TOTAL ENERGY', 'B2PLYP-D2 TOTAL ENERGY', 'B2PLYP-D3 TOTAL ENERGY', 'B2PLYP-D3(BJ) TOTAL ENERGY',
'B2PLYP-D3M TOTAL ENERGY', 'B2PLYP-D3M(BJ) TOTAL ENERGY',
'DSD-PBEP86 TOTAL ENERGY', 'DSD-PBEP86-D2 TOTAL ENERGY', 'DSD-PBEP86-D3 TOTAL ENERGY', 'DSD-PBEP86-D3(BJ) TOTAL ENERGY',
'WB97X-2 TOTAL ENERGY'],
'dfhf': ['HF-CABS TOTAL ENERGY', 'MP2-F12 TOTAL ENERGY', 'SCS-MP2-F12 TOTAL ENERGY', 'SCS(N)-MP2-F12 TOTAL ENERGY',
'SCS(MI)-MP2-F12 TOTAL ENERGY', 'DW-MP2-F12 TOTAL ENERGY', 'MP2C-F12 TOTAL ENERGY',
'SCF TOTAL ENERGY', 'HF TOTAL ENERGY', 'MP2 TOTAL ENERGY', 'SCS-MP2 TOTAL ENERGY', 'SCS(N)-MP2 TOTAL ENERGY',
'SCS(MI)-MP2 TOTAL ENERGY', 'DW-MP2 TOTAL ENERGY', 'MP2C TOTAL ENERGY',
'B3LYP FUNCTIONAL TOTAL ENERGY', 'B3LYP TOTAL ENERGY', 'B3LYP-D2 TOTAL ENERGY', 'B3LYP-D3 TOTAL ENERGY', 'B3LYP-D3(BJ) TOTAL ENERGY', 'B3LYP-XDM TOTAL ENERGY',
'BLYP FUNCTIONAL TOTAL ENERGY', 'BLYP TOTAL ENERGY', 'BLYP-D2 TOTAL ENERGY', 'BLYP-D3 TOTAL ENERGY', 'BLYP-D3(BJ) TOTAL ENERGY',
'BP86 FUNCTIONAL TOTAL ENERGY', 'BP86 TOTAL ENERGY', 'BP86-D2 TOTAL ENERGY', 'BP86-D3 TOTAL ENERGY', 'BP86-D3(BJ) TOTAL ENERGY',
'PBE FUNCTIONAL TOTAL ENERGY', 'PBE TOTAL ENERGY', 'PBE-D2 TOTAL ENERGY', 'PBE-D3 TOTAL ENERGY', 'PBE-D3(BJ) TOTAL ENERGY',
'PBE0 FUNCTIONAL TOTAL ENERGY', 'PBE0 TOTAL ENERGY', 'PBE0-D2 TOTAL ENERGY', 'PBE0-D3 TOTAL ENERGY', 'PBE0-D3(BJ) TOTAL ENERGY',
'B97 FUNCTIONAL TOTAL ENERGY', 'B97 TOTAL ENERGY', 'B97-D2 TOTAL ENERGY', 'B97-D3 TOTAL ENERGY', 'B97-D3(BJ) TOTAL ENERGY',
'B2PLYP TOTAL ENERGY', 'B2PLYP-D2 TOTAL ENERGY', 'B2PLYP-D3 TOTAL ENERGY', 'B2PLYP-D3(BJ) TOTAL ENERGY',
'WPBE FUNCTIONAL TOTAL ENERGY', 'WPBE TOTAL ENERGY', 'WPBE-D3 TOTAL ENERGY', 'WPBE-D3(BJ) TOTAL ENERGY',
'M05-2X FUNCTIONAL TOTAL ENERGY', 'M05-2X TOTAL ENERGY',
'WB97X FUNCTIONAL TOTAL ENERGY', 'WB97X-D TOTAL ENERGY',
'B3LYP-D3M TOTAL ENERGY', 'BLYP-D3M TOTAL ENERGY', 'BP86-D3M TOTAL ENERGY', 'PBE-D3M TOTAL ENERGY',
'PBE0-D3M TOTAL ENERGY', 'B97-D3M TOTAL ENERGY', 'B2PLYP-D3M TOTAL ENERGY', 'WPBE-D3M TOTAL ENERGY',
'B3LYP-D3M(BJ) TOTAL ENERGY', 'BLYP-D3M(BJ) TOTAL ENERGY', 'BP86-D3M(BJ) TOTAL ENERGY', 'PBE-D3M(BJ) TOTAL ENERGY',
'PBE0-D3M(BJ) TOTAL ENERGY', 'B97-D3M(BJ) TOTAL ENERGY', 'B2PLYP-D3M(BJ) TOTAL ENERGY', 'WPBE-D3M(BJ) TOTAL ENERGY',
],
'dfmp': ['MP2-F12 CORRELATION ENERGY', 'MP2-F12 TOTAL ENERGY', 'MP2-F12 SAME-SPIN CORRELATION ENERGY',
'SCS-MP2-F12 CORRELATION ENERGY', 'SCS-MP2-F12 TOTAL ENERGY',
'SCS(N)-MP2-F12 CORRELATION ENERGY', 'SCS(N)-MP2-F12 TOTAL ENERGY',
'SCS(MI)-MP2-F12 CORRELATION ENERGY', 'SCS(MI)-MP2-F12 TOTAL ENERGY',
'DW-MP2-F12 CORRELATION ENERGY', 'DW-MP2-F12 TOTAL ENERGY',
'MP2C-F12 CORRELATION ENERGY', 'MP2C-F12 TOTAL ENERGY',
'MP2 CORRELATION ENERGY', 'MP2 TOTAL ENERGY', 'MP2 SAME-SPIN CORRELATION ENERGY',
'SCS-MP2 CORRELATION ENERGY', 'SCS-MP2 TOTAL ENERGY',
'SCS(N)-MP2 CORRELATION ENERGY', 'SCS(N)-MP2 TOTAL ENERGY',
'SCS(MI)-MP2 CORRELATION ENERGY', 'SCS(MI)-MP2 TOTAL ENERGY',
'DW-MP2 CORRELATION ENERGY', 'DW-MP2 TOTAL ENERGY',
'MP2C CORRELATION ENERGY', 'MP2C TOTAL ENERGY',
'SAPT2+DMP2 TOTAL ENERGY', 'SAPT2+(CCD)DMP2 TOTAL ENERGY',
'SAPT2+(3)DMP2 TOTAL ENERGY', 'SAPT2+(3)(CCD)DMP2 TOTAL ENERGY',
'SAPT2+3DMP2 TOTAL ENERGY', 'SAPT2+3(CCD)DMP2 TOTAL ENERGY',
'B2PLYP TOTAL ENERGY', 'B2PLYP-D2 TOTAL ENERGY', 'B2PLYP-D3 TOTAL ENERGY', 'B2PLYP-D3(BJ) TOTAL ENERGY',
'B2PLYP-D3M TOTAL ENERGY', 'B2PLYP-D3M(BJ) TOTAL ENERGY',
],
}
|
ashutoshvt/psi4
|
psi4/driver/qcdb/psivarrosetta.py
|
Python
|
lgpl-3.0
| 18,416
|
[
"Psi4"
] |
c4c1999ccda591844d4b3288b75f53e265023cbf0e908bf624c8b863dcaca4e4
|
import libtcodpy as libtcod
import math
import string
import textwrap
import shelve
import pygame
import pygame.mixer
#actual size of the window
SCREEN_WIDTH = 60
SCREEN_HEIGHT = 60
PANEL_HEIGHT = 11
BAR_WIDTH = 7
PANEL_Y = 0
GUI_X_OFFSET = -1
GUI_Y_OFFSET = -12
MSG_X = 12
MSG_WIDTH = SCREEN_WIDTH - MSG_X - 2
MSG_HEIGHT = PANEL_HEIGHT - 4
#size of the display for the map
MAP_WIDTH = 58
MAP_HEIGHT = 58 - PANEL_HEIGHT
ROOM_MAX_SIZE = 10
ROOM_MIN_SIZE = 6
MAX_ROOMS = 30
FOV_ALGO = 0
FOV_LIGHT_WALLS = True
TORCH_RADIUS = 10
LEVEL_UP_BASE = 200
LEVEL_UP_FACTOR = 150
LEVEL_SCREEN_WIDTH = 40
USER_ShowStatPercentage = False
USER_RenderMode = 'modern'
USER_BGM_VOL = 1
USER_SFX_VOL = .5
#BGM_TITLEMUSIC = 'audio/bgm/titlemusic.mp3'
#BGM_CAVEMUSIC = 'audio/bgm/cavemusic.mp3'
SFX_LEVELUP = 'audio/sfx/SFXLevelUp.wav'
SFX_PLAYERPUNCH = None
SFX_BATHIT = 'audio/sfx/HITBat.wav'
SFX_ORCHIT = 'audio/sfx/HITOrc.wav'
SFX_TROLLHIT = 'audio/sfx/HITTroll.wav'
SFX_CONFUSEHIT = 'audio/sfx/HITConfuse.wav'
SFX_FIREBALLHIT = 'audio/sfx/HITFireball.wav'
SFX_LIGHTNINGHIT = 'audio/sfx/HITLightning.wav'
SFX_POTIONPICKUP = 'audio/sfx/PICKUPHealthPot.wav'
SFX_SCROLLPICKUP = None
SFX_POTIONUSE = 'audio/sfx/USEHealthPot.wav'
color_dark_wall = libtcod.black
color_light_wall = libtcod.silver
color_dark_ground = libtcod.darkest_gray
color_light_ground = libtcod.darker_sepia
color_dark_wall_bk = libtcod.black
color_light_wall_bk = libtcod.black
color_dark_ground_bk = libtcod.black
color_light_ground_bk = libtcod.black
INV_LIMIT = 26
index = 0 #for the inventory menu
selection = 0 #for the inventory menu
SmallHealthPot = 35
LIGHTNING_DAMAGE = 40
LIGHTNING_RANGE = 5
CONFUSE_NUM_TURNS = 10
CONFUSE_RANGE = 8
FIREBALL_RADIUS = 3
FIREBALL_DAMAGE = 25
def PlaySFX(path):
if path != None:
s = pygame.mixer.Sound(path)
s.set_volume(USER_SFX_VOL)
s.play()
class Tile:
#a tile of the map and its properties
def __init__(self, blocked, block_sight = None):
self.blocked = blocked
#all tiles start unexplored
self.explored = False
#by default, if a tile is blocked, it also blocks sight
if block_sight is None: block_sight = blocked
self.block_sight = block_sight
class Rect:
#a rectangle on the map. used to characterize a room.
def __init__(self, x, y, w, h):
self.x1 = x
self.y1 = y
self.x2 = x + w
self.y2 = y + h
def center(self):
center_x = (self.x1 + self.x2) / 2
center_y = (self.y1 + self.y2) / 2
return (center_x, center_y)
def intersect(self, other):
#returns true if this rectangle intersects with another one
return (self.x1 <= other.x2 and self.x2 >= other.x1 and
self.y1 <= other.y2 and self.y2 >= other.y1)
class Object:
#this is a generic object: the player, a monster, an item, the stairs...
#it's always represented by a character on screen.
def __init__(self, x, y, char, name, color, blocks=False, always_visible=False, fighter=None, ai=None, item=None, equipment=None):
self.x = x
self.y = y
self.name = name
self.blocks = blocks
self.always_visible = always_visible
self.char = char
self.color = color
self.fighter = fighter
if self.fighter:
self.fighter.owner = self
self.ai = ai
if self.ai:
self.ai.owner = self
self.item = item
if self.item:
self.item.owner = self
self.equipment = equipment
if self.equipment:
self.equipment.owner = self
#there must be an Item component for the Equipment component
self.item = Item()
self.item.owner = self
def send_to_back(self):
#make this object be drawn first,
#so that any others appear above it
global objects
objects.remove(self)
objects.insert(0, self)
def move(self, dx, dy):
#move by the given amount, if the destination is not blocked
if not is_blocked(self.x + dx, self.y + dy):
self.x += dx
self.y += dy
def move_astar(self, target):
#Create a FOV map that has the dimensions of the map
fov = libtcod.map_new(MAP_WIDTH, MAP_HEIGHT)
#Scan the current map each turn and set all the walls as unwalkable
for y1 in range(MAP_HEIGHT):
for x1 in range(MAP_WIDTH):
libtcod.map_set_properties(fov, x1, y1, not map[x1][y1].block_sight, not map[x1][y1].blocked)
#Scan all the objects to see if there are objects that must be navigated around
#Check also that the object isn't self or the target (so that the start and the end points are free)
#The AI class handles the situation if self is next to the target so it will not use this A* function anyway
for obj in objects:
if obj.blocks and obj != self and obj != target:
#Set the tile as a wall so it must be navigated around
libtcod.map_set_properties(fov, obj.x, obj.y, True, False)
#Allocate a A* path
#The 1.41 is the normal diagonal cost of moving, it can be set as 0.0 if diagonal moves are prohibited
my_path = libtcod.path_new_using_map(fov, 1.41)
#Compute the path between self's coordinates and the target's coordinates
libtcod.path_compute(my_path, self.x, self.y, target.x, target.y)
#Check if the path exists, and in this case, also the path is shorter than 25 tiles
#The path size matters if you want the monster to use alternative longer paths (for example through other rooms) if for example the player is in a corridor
#It makes sense to keep path size relatively low to keep the monsters from running around the map if there's an alternative path really far away
if not libtcod.path_is_empty(my_path) and libtcod.path_size(my_path) < 25:
#Find the next coordinates in the computed full path
x, y = libtcod.path_walk(my_path, True)
if x or y:
#Set self's coordinates to the next path tile
self.x = x
self.y = y
else:
#Keep the old move function as a backup so that if there are no paths (for example another monster blocks a corridor)
#it will still try to move towards the player (closer to the corridor opening)
self.move_towards(target.x, target.y)
#Delete the path to free memory
libtcod.path_delete(my_path)
def move_towards(self, target_x, target_y):
#vector from this object to the target, and distance
dx = target_x - self.x
dy = target_y - self.y
distance = math.sqrt(dx ** 2 + dy ** 2)
#normalize it to length 1 (preserving direction), then round it and
#convert to integer so the movement is restricted to the map grid
dx = int(round(dx / distance))
dy = int(round(dy / distance))
self.move(dx, dy)
def distance_to(self, other):
#return the distance to another object
dx = other.x - self.x
dy = other.y - self.y
return math.sqrt(dx ** 2 + dy ** 2)
def distance(self, x, y):
#returns the distance to given coordinates
return math.sqrt((x-self.x) ** 2 + (y - self.y) ** 2)
def draw(self):
#only show if it's visible to the player
if (libtcod.map_is_in_fov(fov_map, self.x, self.y) or
(self.always_visible and map[self.x][self.y].explored)):
#set the color and then draw the character that represents this object at its position
libtcod.console_set_default_foreground(con, self.color)
libtcod.console_put_char(con, self.x, self.y, self.char, libtcod.BKGND_NONE)
def clear(self):
#erase the character that represents this object
libtcod.console_put_char(con, self.x , self.y, ' ', libtcod.BKGND_NONE)
class Fighter:
global USER_SFX_VOL
#combat-related properties
def __init__(self, hp, defense, power, speed, dexterity, xp, sight_range, death_function=None, atksound=None, hitsound=None):
self.base_max_hp = hp
self.hp = hp
self.base_defense = defense
self.base_power = power
self.base_speed = speed
self.timer = 0
self.dexterity = dexterity
self.xp = xp
self.sight_range = sight_range
self.death_function = death_function
self.atksound = atksound
self.hitsound = hitsound
@property
def power(self): #return actual power, by summing up the bonuses from all equipped items
bonus = sum(equipment.power_bonus for equipment in get_all_equipped(self.owner))
return self.base_power + bonus
@property
def defense(self): #return actual defense, by summing up the bonuses from all equipped items
bonus = sum(equipment.defense_bonus for equipment in get_all_equipped(self.owner))
return self.base_defense + bonus
@property
def max_hp(self): #return actual max_hp, by summing up the bonuses from all equipped items
bonus = sum(equipment.max_hp_bonus for equipment in get_all_equipped(self.owner))
return self.base_max_hp + bonus
@property
def speed(self): #return actual speed
bonus = sum(equipment.speed_bonus for equipment in get_all_equipped(self.owner))
return self.base_speed + bonus
def take_damage(self, damage):
if damage > 0:
self.hp -= damage
if self.hitsound != None:
s = pygame.mixer.Sound(self.hitsound)
s.set_volume(USER_SFX_VOL)
s.play()
if self.hp <= 0:
self.hp = 0
function = self.death_function
if function is not None:
function(self.owner)
if self.owner != player: #yield xp to the player for the kill
player.fighter.xp += self.xp
def heal(self, amount):
self.hp += amount
if self.hp > self.max_hp:
self.hp = self.max_hp
def attack(self, target):
dodge = False
dodgecheck = libtcod.random_get_float(0, 1, 100)
if dodgecheck < ((target.fighter.dexterity*10)/8): #chance to dodge = (dex*10)/8 (3 dex = 3.75% chance to dodge)
dodge = True
message(self.owner.name.capitalize() + ' attacks ' + target.name + ', but ' + target.name + ' evades!', libtcod.yellow)
if not dodge:
damage = ((int(self.power + (self.power*0.1))) - target.fighter.defense) # (power + (10% Power)) - defense
color = libtcod.amber
if self.owner == player:
color = libtcod.cyan
if damage > 0:
message(self.owner.name.capitalize() + ' attacks ' + target.name + ' for ' + str(damage) + ' damage!', color)
target.fighter.take_damage(damage)
else:
message(self.owner.name.capitalize() + ' attacks ' + target.name + ', but it has no affect.', color)
class BasicMonster:
#AI for a Basic Monster
def take_turn(self):
#a basic monster takes its turn
monster = self.owner
print monster
if monster.distance_to(player) <= monster.fighter.sight_range: #if this monster is within its 'sight range' of the player..
if monster.distance_to(player) >= 2:
monster.move_astar(player)
elif player.fighter.hp > 0:
monster.fighter.attack(player)
class BatAI:
#AI for a Basic Monster
def take_turn(self):
#a bat monster takes its turn.
monster = self.owner
print monster
if monster.distance_to(player) <= monster.fighter.sight_range: #if this monster is within its 'sight range' of the player..
r = libtcod.random_get_int(0, 1, 3)
print r
if r == 1: #1/3 chance to make a random movement
dx = libtcod.random_get_int(0, -1, 1)
dy = libtcod.random_get_int(0, -1, 1)
monster.move(dx, dy)
else: #2/3 chance to move to attack
if monster.distance_to(player) >= 2:
monster.move_astar(player)
elif player.fighter.hp > 0:
monster.fighter.attack(player)
class ConfusedMonster:
#AI for a temporarily confused monster
def __init__(self, old_ai, num_turns=CONFUSE_NUM_TURNS):
self.old_ai = old_ai
self.num_turns = num_turns
def take_turn(self):
if self.num_turns > 0: #still confused..
self.owner.move(libtcod.random_get_int(0, -1, 1), libtcod.random_get_int(0, -1, 1))
self.num_turns -= 1
else: #spell has broken
self.owner.ai = self.old_ai
message('The ' + self.owner.name + ' is no longer confused.', libtcod.red)
class Item:
#an item that can be picked up and used.
def __init__(self, use_function=None, pickupsound=None, usesound=None):
self.use_function = use_function
self.pickupsound = pickupsound
self.usesound = usesound
def pick_up(self):
#add to the players inventory, remove from the map
if len(inventory) >= INV_LIMIT:
message('Your inventory is full, you cannot pick up ' + self.owner.name, libtcod.orange)
else:
inventory.append(self.owner)
objects.remove(self.owner)
message('You picked up a ' + self.owner.name + '.', libtcod.green)
if self.pickupsound:
s = pygame.mixer.Sound(self.pickupsound)
s.set_volume(USER_SFX_VOL)
s.play()
#special case: automatically equip, if the corresponding slot is unused
equipment = self.owner.equipment
if equipment and get_equipped_in_slot(equipment.slot) is None:
equipment.equip()
def drop(self):
#special case: if the object is 'Equipment', unequp it before dropping
if self.owner.equipment:
self.owner.equipment.unequp()
#add to the map and remove from players inventory
objects.append(self.owner)
inventory.remove(self.owner)
self.owner.x = player.x
self.owner.y = player.y
message ('You dropped a ' + self.owner.name + '.', libtcod.yellow)
if self.pickupsound:
PlaySFX(self.pickupsound)
def use(self):
#special case: if the object has the Equipment component, the "use" action is to equip/unequp
if self.owner.equipment:
self.owner.equipment.toggle_equip()
return
#just call the "use_function" if it is defined
if self.use_function is None:
message('The ' + self.owner.name + ' cannot be used.')
else:
if self.use_function() != 'cancelled':
inventory.remove(self.owner) #destroy after use, unless it was cancelled for some reason
if self.usesound != None:
PlaySFX(self.usesound)
fov_recompute = True
class Equipment:
#an object tnat can be equipped, yielding bonuses. Automatically adds the 'item' components
def __init__(self, slot, power_bonus=0, defense_bonus=0, max_hp_bonus=0, speed_bonus=0,):
self.slot = slot
self.power_bonus = power_bonus
self.defense_bonus = defense_bonus
self.max_hp_bonus = max_hp_bonus
self.speed_bonus = speed_bonus
self.is_equipped = False
def toggle_equip(self):
if self.is_equipped:
self.unequp()
else:
self.equip()
def equip(self):
old_equipment = get_equipped_in_slot(self.slot)
if old_equipment is not None:
old_equipment.unequp()
#equip object and show a message
self.is_equipped = True
message ('Equipped ' + self.owner.name + ' on ' + self.slot + '.', libtcod.light_green)
def unequp(self):
#unequp object and show a message
if not self.is_equipped: return #stop if it isnt equipped
self.is_equipped = False
message('unequpped ' + self.owner.name + ' from ' + self.slot + '.', libtcod.light_yellow)
def get_equipped_in_slot(slot):
for obj in inventory:
if obj.equipment and obj.equipment.slot == slot and obj.equipment.is_equipped:
return obj.equipment
return None
def get_all_equipped(obj):
if obj == player:
equipped_list = []
for item in inventory:
if item.equipment and item.equipment.is_equipped:
equipped_list.append(item.equipment)
return equipped_list
else:
return [] #other objects have no equipment
def is_blocked(x, y):
#first test the map tile
if map[x][y].blocked:
return True
for obj in objects:
if obj.blocks and obj.x == x and obj.y == y:
return True
return False
def create_room(room):
global map
#go through the tiles in the rectangle and make them passable
for x in range(room.x1 + 1, room.x2):
for y in range(room.y1 + 1, room.y2):
map[x][y].blocked = False
map[x][y].block_sight = False
def create_h_tunnel(x1, x2, y):
global map
#horizontal tunnel. min() and max() are used in case x1>x2
for x in range(min(x1, x2), max(x1, x2) + 1):
map[x][y].blocked = False
map[x][y].block_sight = False
def create_v_tunnel(y1, y2, x):
global map
#vertical tunnel
for y in range(min(y1, y2), max(y1, y2) + 1):
map[x][y].blocked = False
map[x][y].block_sight = False
def from_dungeon_level(table):
#returns a value dependant on the dungeon level..
#the table specifies what value occurs after each level.. default 0
for (value, level) in reversed(table):
if dungeon_level >= level:
return value
return 0
def random_choice(chances_dict):
chances = chances_dict.values()
strings = chances_dict.keys()
return strings[random_choice_index(chances)]
def random_choice_index(chances): #choose one option from list of chances, returning its index
#the dice will land on some number between 1 and the sum of the chances
dice = libtcod.random_get_int(0, 1, sum(chances))
#go through all the chances, keeping the sum so far
running_sum = 0
choice = 0
for w in chances:
running_sum += w
#see if the dice landed in the part that corresponds to the choice
if dice <= running_sum:
return choice
choice += 1
def place_objects(room):
#maximum number of monsters per room
max_monsters = from_dungeon_level([[2,1], [3, 4], [5,6]])
#chances of each monster
monster_chances = {}
monster_chances['bat'] = from_dungeon_level([[90, 1], [55, 2], [40, 3]])
monster_chances['orc'] = from_dungeon_level([[10, 1], [30, 2], [30, 3]])
monster_chances['troll'] = from_dungeon_level([[15,3], [30, 5], [60,7]])
#maximum number of items per room
max_items = from_dungeon_level([[1,1], [2,4]])
#chance of each it
item_chances = {}
item_chances['sword'] = from_dungeon_level([[10, 1], [10, 2]])
item_chances['shield'] = from_dungeon_level([[10, 1], [10, 2]])
item_chances['heal'] = from_dungeon_level([[80, 1], [70, 2]])
item_chances['lightning'] = from_dungeon_level([[25, 6]])
item_chances['fireball'] = from_dungeon_level([[10, 2]])
item_chances['confuse'] = from_dungeon_level([[10, 4]])
#choose a random number of monsters
num_monsters = libtcod.random_get_int(0, 0, max_monsters)
for i in range(num_monsters):
x = libtcod.random_get_int(0, room.x1, room.x2)
y = libtcod.random_get_int(0, room.y1, room.y2)
if not is_blocked(x, y):
choice = random_choice(monster_chances)
if choice == 'bat':
#create a bat
fighter_component = Fighter(hp=10, defense=0, power=3, speed=5, dexterity=40, xp=30, sight_range=15, death_function=monster_death, hitsound=SFX_BATHIT)
ai_component = BatAI()
monster = Object(x, y, 'b', 'Bat', libtcod.lighter_grey, blocks=True, fighter=fighter_component, ai=ai_component)
elif choice == 'orc':
#create an orc
fighter_component = Fighter(hp=20, defense=0, power=4, speed=3, dexterity=0, xp=35, sight_range=10, death_function=monster_death, hitsound=SFX_ORCHIT)
ai_component = BasicMonster()
monster = Object(x, y, 'o', 'Orc', libtcod.desaturated_green, blocks=True, fighter=fighter_component, ai=ai_component)
else:
#create a Troll
fighter_component = Fighter(hp=30, defense=2, power=8, speed=2, dexterity=2, xp=100, sight_range=5, death_function=monster_death, hitsound=SFX_TROLLHIT)
ai_component = BasicMonster()
monster = Object(x, y, 'T', 'Troll', libtcod.darker_green, blocks=True, fighter=fighter_component, ai=ai_component)
objects.append(monster)
#choose a random number of items
num_items = libtcod.random_get_int(0, 0, max_items)
for i in range (num_items):
#choose a random position
x = libtcod.random_get_int(0, room.x1+1, room.x2-1)
y = libtcod.random_get_int(0, room.y1+1, room.y2-1)
#only place if the tile is not blocked
if not is_blocked(x, y):
choice = random_choice(item_chances)
if choice == 'heal':
item_component = Item(use_function=use_smallhealthpot, pickupsound=SFX_POTIONPICKUP, usesound=SFX_POTIONUSE)
item = Object(x, y, '!', 'Healing Potion', libtcod.white, item=item_component)
elif choice == 'lightning':
item_component = Item(use_function=cast_lightning, pickupsound=SFX_SCROLLPICKUP)
item = Object(x, y, '#', 'Scroll of Lightning Bolt', libtcod.light_yellow, item=item_component)
elif choice == 'fireball':
item_component = Item(use_function=cast_fireball, pickupsound=SFX_SCROLLPICKUP)
item = Object(x, y, '&', 'Scroll of Fireball', libtcod.light_yellow, item=item_component)
elif choice == 'confuse':
item_component = Item(use_function=cast_confuse, pickupsound=SFX_SCROLLPICKUP)
item = Object(x, y, '#', 'Scroll of Confusion', libtcod.light_yellow, item=item_component)
elif choice == 'sword':
equipment_component = Equipment(slot='your main hand', power_bonus=3)
item = Object(x, y, '/', 'Rusty Shortsword', libtcod.sky, equipment=equipment_component)
elif choice == 'shield':
equipment_component = Equipment(slot='your off-hand', defense_bonus=1)
item = Object(x, y, '[', 'Wooden Shield', libtcod.darker_orange, equipment=equipment_component)
objects.append(item)
item.send_to_back() #items appear below other objects
def make_map():
global map, player, objects, stairs
objects = [player]
#fill map with "blocked" tiles
map = [[ Tile(True)
for y in range(MAP_HEIGHT) ]
for x in range(MAP_WIDTH) ]
rooms = []
num_rooms = 0
for r in range(MAX_ROOMS):
#random width and height
w = libtcod.random_get_int(0, ROOM_MIN_SIZE, ROOM_MAX_SIZE)
h = libtcod.random_get_int(0, ROOM_MIN_SIZE, ROOM_MAX_SIZE)
#random position without going out of the boundaries of the map
x = libtcod.random_get_int(0, 0, MAP_WIDTH - w - 1)
y = libtcod.random_get_int(0, 0, MAP_HEIGHT - h - 1)
#"Rect" class makes rectangles easier to work with
new_room = Rect(x, y, w, h)
#run through the other rooms and see if they intersect with this one
failed = False
for other_room in rooms:
if new_room.intersect(other_room):
failed = True
break
if not failed:
#this means there are no intersections, so this room is valid
#"paint" it to the map's tiles
create_room(new_room)
#center coordinates of new room, will be useful later
(new_x, new_y) = new_room.center()
if num_rooms == 0:
#this is the first room, where the player starts at
player.x = new_x
player.y = new_y
else:
#all rooms after the first:
#connect it to the previous room with a tunnel
#center coordinates of previous room
(prev_x, prev_y) = rooms[num_rooms-1].center()
#draw a coin (random number that is either 0 or 1)
if libtcod.random_get_int(0, 0, 1) == 1:
#first move horizontally, then vertically
create_h_tunnel(prev_x, new_x, prev_y)
create_v_tunnel(prev_y, new_y, new_x)
else:
#first move vertically, then horizontally
create_v_tunnel(prev_y, new_y, prev_x)
create_h_tunnel(prev_x, new_x, new_y)
#add some contents to this room, such as monster
place_objects(new_room)
#finally, append the new room to the list
rooms.append(new_room)
num_rooms += 1
#create stairs at the center of the last room
stairs = Object(new_x, new_y, '<', 'stairs', libtcod.white, always_visible=True)
objects.append(stairs)
stairs.send_to_back()
def next_level():
global dungeon_level
#advance to the next level
message('You take a moment to rest, and recover your strength.', libtcod.light_violet)
player.fighter.heal(player.fighter.max_hp/4) #heal the player by 25%
dungeon_level += 1
message('After a rare moment of peace, you descend deeper into the heart of the dungeon...', libtcod.red)
make_map()
initialize_fov()
def theme_update():
global USER_RenderMode
global color_dark_wall, color_dark_wall_bk, color_light_wall, color_light_wall_bk
global color_dark_ground, color_dark_ground_bk, color_light_ground, color_light_ground_bk
if USER_RenderMode == 'oldschool':
player.color = libtcod.yellow
player.char = '@'
color_dark_wall = libtcod.darkest_grey
color_dark_wall_bk = libtcod.black
color_light_wall = libtcod.lighter_grey
color_light_wall_bk = libtcod.black
color_dark_ground = libtcod.darkest_grey
color_dark_ground_bk = libtcod.black
color_light_ground = libtcod.white
color_light_ground_bk = libtcod.black
elif USER_RenderMode == 'modern':
player.color = libtcod.white
player.char = 2
color_dark_wall = libtcod.black
color_light_wall = libtcod.silver
color_dark_ground = libtcod.darkest_gray
color_light_ground = libtcod.darker_sepia
color_dark_ground_bk = color_dark_wall
color_light_wall_bk = color_light_wall
color_dark_ground_bk = color_dark_ground
color_light_ground_bk = color_light_ground
def render_bar(x, y, total_width, name, value, maximum, bar_color, back_color):
#render a gui bar (hp, xp, stamina, etc)
#first calculate width of the bar
bar_width = int(float(value) / maximum * total_width)
#render background first
libtcod.console_set_default_background(panel, back_color)
libtcod.console_rect(panel, x, y, total_width, 1, False, libtcod.BKGND_SCREEN)
#now render the bar on top
libtcod.console_set_default_background(panel, bar_color)
if bar_width > 0:
libtcod.console_rect(panel, x, y, bar_width, 1, False, libtcod.BKGND_SET)
#finally, some centered text with the values
if USER_ShowStatPercentage:
libtcod.console_set_default_foreground(panel, libtcod.black)
libtcod.console_print_ex(panel, x + total_width / 2, y, libtcod.BKGND_NONE, libtcod.CENTER,
str(value) + "/" + str(maximum))
libtcod.console_set_default_foreground(panel, libtcod.white)
def render_all():
global fov_map, mouse
global USER_RenderMode
global fov_recompute, MENU_CHAR_DETAILS
global dungeon_level
global color_dark_wall, color_dark_wall_bk, color_light_wall, color_light_wall_bk
global color_dark_ground, color_dark_ground_bk, color_dark_wall, color_dark_wall_bk
theme_update()
libtcod.console_clear(0)
if fov_recompute:
#recompute FOV if needed (the player moved or something)
fov_recompute = False
libtcod.map_compute_fov(fov_map, player.x, player.y, TORCH_RADIUS, FOV_LIGHT_WALLS, FOV_ALGO)
#go through all tiles, and set their background color according to the FOV
for y in range(MAP_HEIGHT):
for x in range(MAP_WIDTH):
visible = libtcod.map_is_in_fov(fov_map, x, y)
wall = map[x][y].block_sight
if not visible:
#if it's not visible right now, the player can only see it if it's explored
if map[x][y].explored:
if wall:
if USER_RenderMode == 'oldschool':
libtcod.console_put_char_ex(con, x, y, '#', color_dark_wall, color_dark_wall_bk)
else:
libtcod.console_put_char_ex(con, x, y, ' ', color_dark_wall, color_dark_wall)
else:
if USER_RenderMode == 'oldschool':
libtcod.console_put_char_ex(con, x, y, '.', color_dark_ground, color_dark_ground_bk)
else:
libtcod.console_put_char_ex(con, x, y, ' ', color_dark_ground, color_dark_ground)
else:
#it's visible
if wall:
if USER_RenderMode == 'oldschool':
libtcod.console_put_char_ex(con, x, y, '#', color_light_wall, color_light_wall_bk)
else:
libtcod.console_put_char_ex(con, x, y, ' ', color_light_wall, color_light_wall)
else:
if USER_RenderMode == 'oldschool':
libtcod.console_put_char_ex(con, x, y, '.', color_light_ground, color_light_ground_bk)
else:
libtcod.console_put_char_ex(con, x, y, ' ', color_light_ground, color_light_ground)
#since it's visible, explore it
map[x][y].explored = True
#draw all objects in the list
for object in objects:
if object != player:
object.draw()
player.draw()
#blit the contents of "con" to the root console
libtcod.console_blit(con, 0, 0, MAP_WIDTH, MAP_HEIGHT, 0, 1, PANEL_HEIGHT+1)
#prepare to render the GUI panel
libtcod.console_set_default_background(panel, libtcod.black)
libtcod.console_clear(panel)
#print the game messages, one line at a time
y = PANEL_Y + 2
for (line, color) in game_msgs:
libtcod.console_set_default_foreground(panel, color)
libtcod.console_print_ex(panel, MSG_X, y, libtcod.BKGND_NONE, libtcod.LEFT, line)
y += 1
#show the player's name
libtcod.console_set_default_foreground(panel, libtcod.white)
libtcod.console_print_ex(panel, 2, 2, libtcod.BKGND_NONE, libtcod.LEFT,
player.name)
#show the player's stats
#HP Bar
render_bar(2, 4, BAR_WIDTH, 'HP', player.fighter.hp, player.fighter.max_hp,
libtcod.light_green, libtcod.light_red)
#yellow bar
render_bar(2, 6, BAR_WIDTH, 'yeller', 8, 10,
libtcod.light_yellow, libtcod.gold)
#XP to next level bar_color
lv_up_xp = LEVEL_UP_BASE + (player.level * LEVEL_UP_FACTOR)
render_bar(2, 8, BAR_WIDTH, 'bloo', player.fighter.xp, lv_up_xp,
libtcod.cyan, libtcod.darker_azure)
#print the panel borders
libtcod.console_set_default_background(panel, libtcod.white)
libtcod.console_print_frame(panel, 0, PANEL_Y, 11, PANEL_HEIGHT, False, libtcod.BKGND_SET)
libtcod.console_print_frame(panel, 0, PANEL_Y, SCREEN_WIDTH, PANEL_HEIGHT, False, libtcod.BKGND_SET)
libtcod.console_set_default_background(panel, libtcod.black)
libtcod.console_blit(panel, 0, 0, SCREEN_WIDTH, PANEL_HEIGHT, 0, 0, 0)
#check for name under mouse
get_tooltip()
#print root borders
libtcod.console_set_default_background(0, libtcod.white)
libtcod.console_set_default_foreground(0, libtcod.white)
libtcod.console_hline(0, 0, SCREEN_HEIGHT-1, SCREEN_WIDTH, libtcod.BKGND_SET)
libtcod.console_vline(0, 0, 0, SCREEN_HEIGHT, libtcod.BKGND_SET)
libtcod.console_vline(0, SCREEN_WIDTH-1, 0, SCREEN_HEIGHT, libtcod.BKGND_SET)
#print GUI arrows
libtcod.console_set_default_foreground(0, libtcod.black)
libtcod.console_put_char(0, 5, 10, 31, libtcod.BKGND_NONE)
libtcod.console_set_default_foreground(0, libtcod.dark_gray)
libtcod.console_put_char(0, 4, 10, 31, libtcod.BKGND_NONE)
libtcod.console_put_char(0, 6, 10, 31, libtcod.BKGND_NONE)
#print dungeon level
libtcod.console_set_default_foreground(0, libtcod.black)
libtcod.console_print_ex(0, 26, 10, libtcod.BKGND_NONE, libtcod.RIGHT, 'Dungeon Level ' + str(dungeon_level))
libtcod.console_set_default_background(0, libtcod.black)
if game_state == 'charmenu':
menu_chardetail()
elif game_state == 'inventorymenu':
menu_inventory()
def handle_keys():
global game_state
global key, fov_recompute
global USER_ShowStatPercentage, USER_RenderMode
global selection, index #for the inventory menu
global stairs
if key.vk == libtcod.KEY_ENTER and key.lalt:
#Alt+Enter: toggle fullscreen
libtcod.console_set_fullscreen(not libtcod.console_is_fullscreen())
elif key.vk == libtcod.KEY_ALT:
USER_ShowStatPercentage = not USER_ShowStatPercentage
if game_state == 'didnt-take-turn':
game_state = 'playing'
if game_state == 'dead':
if key.vk == libtcod.KEY_ESCAPE:
return exit
if game_state == 'playing':
if key.vk == libtcod.KEY_ESCAPE:
return 'exit' #exit game
elif key.vk == libtcod.KEY_UP or key.vk == libtcod.KEY_KP8:
player_move_or_attack(0, -1)
fov_recompute = True
elif key.vk == libtcod.KEY_KP0:
player.fighter.xp += 20
check_level_up()
elif key.vk == libtcod.KEY_KP9:
player_move_or_attack(1, -1)
fov_recompute = True
elif key.vk == libtcod.KEY_RIGHT or key.vk == libtcod.KEY_KP6:
player_move_or_attack(1, 0)
fov_recompute = True
elif key.vk == libtcod.KEY_KP3:
player_move_or_attack(1, 1)
fov_recompute = True
elif key.vk == libtcod.KEY_DOWN or key.vk == libtcod.KEY_KP2:
player_move_or_attack(0, 1)
fov_recompute = True
elif key.vk == libtcod.KEY_KP1:
player_move_or_attack(-1, 1)
fov_recompute = True
elif key.vk == libtcod.KEY_LEFT or key.vk == libtcod.KEY_KP4:
player_move_or_attack(-1, 0)
fov_recompute = True
elif key.vk == libtcod.KEY_KP7:
player_move_or_attack(-1, -1)
fov_recompute = True
elif key.vk == libtcod.KEY_KP5:
return 'wait'
else:
#test for other keys
key_char = chr(key.c)
if key_char == 'c':
game_state = 'charmenu'
elif key_char == 'f':
player.fighter.speed += 1
elif key_char == 'v':
player.fighter.speed -= 1
elif key_char == 'g':
#pick up an item
for object in objects:
if object.x == player.x and object.y == player.y:
if object.item:
object.item.pick_up()
break
else:
if object != player:
message('You can not pick up the ' + object.name + '.', libtcod.gray)
elif key_char == 'i':
menu_chardetail()
index = 0
selection = 0
game_state = 'inventorymenu'
elif key_char == 'r':
if USER_RenderMode == 'modern':
USER_RenderMode = 'oldschool'
theme_update()
fov_recompute = True
elif USER_RenderMode == 'oldschool':
USER_RenderMode = 'modern'
theme_update()
fov_recompute = True
elif key_char == ',':
print 'stars:' + str(stairs.x) + ',' + str(stairs.y)
print 'player:' + str(player.x) + ',' + str(player.y)
#go down the stairs if the player is on them
if stairs.x == player.x and stairs.y == player.y:
next_level()
return 'didnt-take-turn'
elif game_state == 'charmenu':
key_char = chr(key.c)
if key_char == 'c' or key.vk == libtcod.KEY_ESCAPE:
game_state = 'didnt-take-turn'
if key_char == 'i':
index = 0
selection = 0
game_state = 'inventorymenu'
elif game_state == 'inventorymenu':
key_char = chr(key.c)
if key.vk == libtcod.KEY_ESCAPE:
game_state = 'didnt-take-turn'
elif key.vk == libtcod.KEY_DOWN:
if selection < 21 and len(inventory) > selection+1:
selection += 1
elif key.vk == libtcod.KEY_UP:
if selection > 0 and len(inventory) > selection:
selection -= 1
elif key.vk == libtcod.KEY_ENTER:
if len(inventory) > 0:
itm = inventory[selection].item
if itm:
itm.use()
game_state = 'playing'
else:
if key_char == 'd':
if len(inventory) > 0:
itm = inventory[selection].item
if itm != None:
itm.drop()
if selection > len(inventory)-1:
selection -= 1
elif key_char == 'i':
game_state = 'charmenu'
def get_tooltip():
#TODO : Have this check for specific object components to determine what to return...
# if obj.fighter: #return fighter details
# if obj.equipment: #return stat bonus
global mouse
#return a string with the name of all objects under the mouse
(x, y) = (mouse.cx, mouse.cy)
x += GUI_X_OFFSET
y += GUI_Y_OFFSET
#create a list with the names of all objects at the mouse's coordinates and in FOV
objs = [obj for obj in objects
if obj.x == x and obj.y == y and libtcod.map_is_in_fov(fov_map, obj.x, obj.y)]
if len(objs) > 0:
names = ', '.join(obj.name for obj in objs)
else:
names = None
if names != None:
render_tooltip(names)
def render_tooltip(name):
global mouse
width = len(name) + 4
height = 5
#this ensures the tip does not go outside the screen
x = mouse.cx + 1
y = mouse.cy + 1
#check horizontal borders
if (x + width) > SCREEN_WIDTH -1:
x += (SCREEN_WIDTH - (x + width + 2))
#check vertical borders
if (y + height) > SCREEN_HEIGHT -2:
y += (SCREEN_HEIGHT - (y + height + 2))
libtcod.console_set_default_background(tooltip, libtcod.black)
libtcod.console_clear(tooltip)
libtcod.console_set_default_background(tooltip, libtcod.white)
libtcod.console_set_default_foreground(tooltip, libtcod.white)
libtcod.console_print_frame(tooltip, 0, 0, width, height, False, libtcod.BKGND_SET)
libtcod.console_print_ex(tooltip, 2, (height/2), libtcod.BKGND_NONE, libtcod.LEFT, name)
libtcod.console_blit(tooltip, 0, 0, width, height, 0, x, y)
def message(new_msg, color = libtcod.white):
#split the message if necessary, among multiple lines
new_msg_lines = textwrap.wrap(new_msg, MSG_WIDTH)
for line in new_msg_lines:
#if the buffer is full, remove the first line to make room for the new one
if len(game_msgs) == MSG_HEIGHT:
del game_msgs[0]
#add the new line as a tuple, with the text and the color
game_msgs.append( (line, color) )
def menu(header, options, width):
global key, mouse
if len(options) > 26: raise ValueError('Cannot have a menu with more than 26 options.')
#calculate total height for the header (after auto-wrap) and one line per option
header_height = libtcod.console_get_height_rect(con, 0, 0, width, SCREEN_HEIGHT, header)
if header == '':
header_height = 0
height = len(options) + header_height
#create an off-screen console that represents the menu's window
window = libtcod.console_new(width, height)
#print the header, with auto-wrap
libtcod.console_set_default_foreground(window, libtcod.white)
libtcod.console_print_rect_ex(window, 0, 0, width, height, libtcod.BKGND_NONE, libtcod.LEFT, header)
#print all the options
y = header_height
letter_index = ord('a')
for option_text in options:
text = '(' + chr(letter_index) + ') ' + option_text
libtcod.console_print_ex(window, 0, y, libtcod.BKGND_NONE, libtcod.LEFT, text)
y += 1
letter_index += 1
#blit the contents of "window" to the root console
x = SCREEN_WIDTH/2 - width/2
y = SCREEN_HEIGHT/2 - height/2
libtcod.console_blit(window, 0, 0, width, height, 0, x, y, 1.0, 0.7)
#compute x and y offsets to convert console position to menu position
x_offset = x #x is the left edge of the menu
y_offset = y + header_height #subtract the height of the header from the top edge of the menu
while True:
#present the root console to the player and check for input
libtcod.console_flush()
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE,key,mouse)
if (mouse.lbutton_pressed):
(menu_x, menu_y) = (mouse.cx - x_offset, mouse.cy - y_offset)
#check if click is within the menu and on a choice
if menu_x >= 0 and menu_x < width and menu_y >= 0 and menu_y < height - header_height:
return menu_y
if mouse.rbutton_pressed or key.vk == libtcod.KEY_ESCAPE:
return None #cancel if the player right-clicked or pressed Escape
if key.vk == libtcod.KEY_ENTER and key.lalt:
#Alt+Enter: toggle fullscreen
libtcod.console_set_fullscreen(not libtcod.console_is_fullscreen())
#convert the ASCII code to an index; if it corresponds to an option, return it
index = key.c - ord('a')
if index >= 0 and index < len(options): return index
#if they pressed a letter that is not an option, return None
if index >= 0 and index <= 26: return None
def msgbox(text, width=50):
menu(text, [], width)
def menu_chardetail():
#this is called when the char detail tab is brought up (c)
#prepare menu
libtcod.console_set_default_background(menus, libtcod.dark_grey)
libtcod.console_clear(menus)
libtcod.console_set_default_background(menus, libtcod.white)
libtcod.console_set_default_foreground(menus, libtcod.white)
#draw frame
libtcod.console_print_frame(menus, 0, 10, 10, 16, False, libtcod.BKGND_SET)
libtcod.console_set_default_background(menus, libtcod.dark_grey)
#print stat lables
libtcod.console_print_ex(menus, 2, 12, libtcod.BKGND_NONE, libtcod.LEFT, 'Str')
libtcod.console_print_ex(menus, 2, 13, libtcod.BKGND_NONE, libtcod.LEFT, 'Con')
libtcod.console_print_ex(menus, 2, 14, libtcod.BKGND_NONE, libtcod.LEFT, 'Dex')
libtcod.console_print_ex(menus, 2, 15, libtcod.BKGND_NONE, libtcod.LEFT, 'Int')
libtcod.console_print_ex(menus, 2, 17, libtcod.BKGND_NONE, libtcod.LEFT, 'Dmg')
libtcod.console_print_ex(menus, 2, 18, libtcod.BKGND_NONE, libtcod.LEFT, 'Def')
libtcod.console_print_ex(menus, 2, 19, libtcod.BKGND_NONE, libtcod.LEFT, 'Spd')
#print stats (right aligned to keep with layout)
libtcod.console_print_ex(menus, 7, 12, libtcod.BKGND_NONE, libtcod.RIGHT, str(player.fighter.power))
libtcod.console_print_ex(menus, 7, 13, libtcod.BKGND_NONE, libtcod.RIGHT, str(player.fighter.defense))
libtcod.console_print_ex(menus, 7, 14, libtcod.BKGND_NONE, libtcod.RIGHT, str(player.fighter.dexterity))
libtcod.console_print_ex(menus, 7, 15, libtcod.BKGND_NONE, libtcod.RIGHT, '*')
libtcod.console_print_ex(menus, 7, 17, libtcod.BKGND_NONE, libtcod.RIGHT, str(int(player.fighter.power + (player.fighter.power*.1))))
libtcod.console_print_ex(menus, 7, 18, libtcod.BKGND_NONE, libtcod.RIGHT, str(player.fighter.defense))
libtcod.console_print_ex(menus, 7, 19, libtcod.BKGND_NONE, libtcod.RIGHT, str(player.fighter.speed))
libtcod.console_set_default_background(menus, libtcod.light_grey)
libtcod.console_set_default_foreground(menus, libtcod.black)
libtcod.console_print_ex(menus, 2, 21, libtcod.BKGND_SET, libtcod.LEFT, ' Inv ' + chr(16))
libtcod.console_print_ex(menus, 2, 23, libtcod.BKGND_SET, libtcod.LEFT, ' Set ' + chr(16))
#print player level
libtcod.console_print_ex(menus, 2, 10, libtcod.BKGND_NONE, libtcod.LEFT, 'Lvl')
libtcod.console_print_ex(menus, 7, 10, libtcod.BKGND_NONE, libtcod.LEFT, str(player.level))
libtcod.console_blit(menus, 0, 10, 10, 16, 0, 0, 10)
def menu_inventory():
global selection, index
libtcod.console_set_default_background(menus, libtcod.white)
libtcod.console_set_default_foreground(menus, libtcod.white)
libtcod.console_print_frame(menus, 9, 19, 46, 35, False, libtcod.BKGND_SET)
libtcod.console_hline(menus, 9, 27, 46, libtcod.BKGND_SET)
libtcod.console_set_default_foreground(menus, libtcod.light_yellow)
libtcod.console_print_ex(menus, 11, 21, libtcod.BKGND_NONE, libtcod.LEFT, 'Equipment')
libtcod.console_print_ex(menus, 11, 29, libtcod.BKGND_NONE, libtcod.LEFT, 'Inventory')
libtcod.console_set_default_foreground(menus, libtcod.white)
libtcod.console_print_ex(menus, 14, 23, libtcod.BKGND_NONE, libtcod.LEFT, 'M Hand : Head :')
libtcod.console_print_ex(menus, 14, 24, libtcod.BKGND_NONE, libtcod.LEFT, 'O Hand : Body :')
libtcod.console_print_ex(menus, 14, 25, libtcod.BKGND_NONE, libtcod.LEFT, ' Ammo : Feet :')
libtcod.console_set_default_foreground(menus, libtcod.darkest_gray)
libtcod.console_print_ex(menus, 23, 23, libtcod.BKGND_NONE, libtcod.LEFT, '[ EMPTY ]')
libtcod.console_print_ex(menus, 41, 23, libtcod.BKGND_NONE, libtcod.LEFT, '[ EMPTY ]')
libtcod.console_print_ex(menus, 23, 24, libtcod.BKGND_NONE, libtcod.LEFT, '[ EMPTY ]')
libtcod.console_print_ex(menus, 41, 24, libtcod.BKGND_NONE, libtcod.LEFT, '[ EMPTY ]')
libtcod.console_print_ex(menus, 23, 25, libtcod.BKGND_NONE, libtcod.LEFT, '[ EMPTY ]')
libtcod.console_print_ex(menus, 41, 25, libtcod.BKGND_NONE, libtcod.LEFT, '[ EMPTY ]')
libtcod.console_set_default_background(menus, libtcod.darkest_gray)
libtcod.console_hline(menus, 14, 31, 36, libtcod.BKGND_SET)
libtcod.console_hline(menus, 14, 33, 36, libtcod.BKGND_SET)
libtcod.console_hline(menus, 14, 35, 36, libtcod.BKGND_SET)
libtcod.console_hline(menus, 14, 37, 36, libtcod.BKGND_SET)
libtcod.console_hline(menus, 14, 39, 36, libtcod.BKGND_SET)
libtcod.console_hline(menus, 14, 41, 36, libtcod.BKGND_SET)
libtcod.console_hline(menus, 14, 43, 36, libtcod.BKGND_SET)
libtcod.console_hline(menus, 14, 45, 36, libtcod.BKGND_SET)
libtcod.console_hline(menus, 14, 47, 36, libtcod.BKGND_SET)
libtcod.console_hline(menus, 14, 49, 36, libtcod.BKGND_SET)
libtcod.console_hline(menus, 14, 51, 36, libtcod.BKGND_SET)
libtcod.console_set_default_foreground(menus, libtcod.darker_gray)
libtcod.console_set_default_background(menus, libtcod.darker_gray)
libtcod.console_hline(menus, 14, 32, 36, libtcod.BKGND_SET)
libtcod.console_hline(menus, 14, 34, 36, libtcod.BKGND_SET)
libtcod.console_hline(menus, 14, 36, 36, libtcod.BKGND_SET)
libtcod.console_hline(menus, 14, 38, 36, libtcod.BKGND_SET)
libtcod.console_hline(menus, 14, 40, 36, libtcod.BKGND_SET)
libtcod.console_hline(menus, 14, 42, 36, libtcod.BKGND_SET)
libtcod.console_hline(menus, 14, 44, 36, libtcod.BKGND_SET)
libtcod.console_hline(menus, 14, 46, 36, libtcod.BKGND_SET)
libtcod.console_hline(menus, 14, 48, 36, libtcod.BKGND_SET)
libtcod.console_hline(menus, 14, 50, 36, libtcod.BKGND_SET)
libtcod.console_set_default_foreground(menus, libtcod.white)
if len(inventory) > 0:
for item in inventory:
index = inventory.index(item)
if index < 21:
if item.equipment and item.equipment.is_equipped:
libtcod.console_print_ex(menus, 14, 31+index, libtcod.BKGND_NONE, libtcod.LEFT, "* " + item.name + " *")
else:
libtcod.console_print_ex(menus, 14, 31+index, libtcod.BKGND_NONE, libtcod.LEFT, item.name)
libtcod.console_set_default_foreground(menus, libtcod.light_yellow)
libtcod.console_set_default_background(menus, libtcod.light_yellow)
libtcod.console_hline(menus, 14, 31+selection, 36, libtcod.BKGND_SET)
libtcod.console_set_default_foreground(menus, libtcod.black)
if inventory[selection].equipment and inventory[selection].equipment.is_equipped:
libtcod.console_print_ex(menus, 14, 31+selection, libtcod.BKGND_SET, libtcod.LEFT, "* " + inventory[selection].name + " *")
else:
libtcod.console_print_ex(menus, 14, 31+selection, libtcod.BKGND_SET, libtcod.LEFT, inventory[selection].name)
#show any 'equipped' items
for item in inventory:
text = str(item.name)
if len(text) >= 9:
text = text[0:8] + "."
else:
for n in range((9 - len(text))):
text = text + ' '
if item.equipment and item.equipment.is_equipped:
libtcod.console_set_default_foreground(menus, libtcod.cyan)
if item.equipment.slot == 'your main hand':
libtcod.console_print_ex(menus, 23, 23, libtcod.BKGND_NONE, libtcod.LEFT, text)
elif item.equipment.slot == 'your off-hand':
libtcod.console_print_ex(menus, 23, 24, libtcod.BKGND_NONE, libtcod.LEFT, text)
#blit the char_detail menu
libtcod.console_blit(menus, 0, 10, 10, 16, 0, 0, 10)
#blit the inventory
libtcod.console_blit(menus, 9, 19, 46, 35, 0, 9, 19)
def player_move_or_attack(dx, dy):
global fov_recompute
x = player.x + dx
y = player.y + dy
target = None
for object in objects:
if object.fighter and object.x == x and object.y == y:
target = object
break
#attack if there is a target, otherwise move
if target is not None:
player.fighter.attack(target)
else:
player.move(dx, dy)
fov_recompute = True
def check_level_up():
#see if the player has enough XP level
level_up_xp = LEVEL_UP_BASE + (player.level * LEVEL_UP_FACTOR)
if player.fighter.xp >= level_up_xp:
player.level += 1
player.fighter.xp -= level_up_xp
message('Your battle skills grow stronger! You reached level ' + str(player.level) + '!', libtcod.yellow)
PlaySFX(SFX_LEVELUP)
choice = None
while choice == None:
choice = menu('Level up! Choose a stat to raise:\n',
['Constitution (+20 HP from ' + str(player.fighter.max_hp) + ')',
'Strength (+1 power, from ' + str(player.fighter.power) + ')',
'Agility (+1 dexterity, from ' + str(player.fighter.dexterity) + ')'], LEVEL_SCREEN_WIDTH)
if choice == 0:
player.fighter.max_hp += 20
player.fighter.hp += int((player.fighter.max_hp * .2))
elif choice == 1:
player.fighter.base_power += 1
elif choice == 2:
player.fighter.defense += 1
def closest_monster(max_range):
#find closest enemy, up to a maximum range, and in the player's FOV
closest_enemy = None
closest_dist = max_range + 1 #start with (slightly more than) maximum range
for object in objects:
if object.fighter and not object == player and libtcod.map_is_in_fov(fov_map, object.x, object.y):
#calculate distance between this object and the player
dist = player.distance_to(object)
if dist < closest_dist: #it's closer, so remember it
closest_enemy = object
closest_dist = dist
return closest_enemy
def target_tile(max_range=None):
global key, mouse
while True:
libtcod.console_flush()
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE, key, mouse)
render_all()
(x, y) = (mouse.cx, mouse.cy)
x += GUI_X_OFFSET
y += GUI_Y_OFFSET
#accept the target if the player clicked in FOV, and in case a range is specified, if it's in that range
if (mouse.lbutton_pressed and libtcod.map_is_in_fov(fov_map, x, y) and
(max_range is None or player.distance(x, y) <= max_range)):
return (x, y)
if mouse.rbutton_pressed or key.vk == libtcod.KEY_ESCAPE:
return (None, None) #cancel if the player right-clicked or pressed Escape
def target_monster(max_range=None):
while True:
(x, y) = target_tile(max_range)
if x is None:
return None
for obj in objects:
if obj.x == x and obj.y == y and obj.fighter and obj != player:
return obj
def player_death(player):
#the game ended!
global game_state
message('You died!', libtcod.red)
game_state = 'dead'
#for added effect, transform the player into a corpse!
player.char = '%'
player.color = libtcod.dark_red
player.name = player.name + ' Remains'
def monster_death(monster):
#transform it into a nasty corpse!
#doesnt block, can't move, can't be attacked
message(monster.name.capitalize() + ' dies from its wounds!', libtcod.gray)
message('You gain ' + str(monster.fighter.xp) + ' experience points!', libtcod.lightest_cyan)
monster.char = '%'
monster.color = libtcod.dark_red
monster.send_to_back()
monster.blocks = False
monster.ai = None
monster.fighter = None
monster.name = monster.name + ' Remains'
def cast_confuse():
global game_state
game_state = 'playing'
#find closest enemy in-range and confuse it
monster = target_monster(CONFUSE_RANGE)
if monster is None:
message('No enemy is close enough to confuse.', libtcod.red)
return 'cancelled'
old_ai = monster.ai
monster.ai = ConfusedMonster(old_ai)
monster.ai.owner = monster
message('The eyes of the ' + monster.name + ' look vacant, as he starts to stumble around.', libtcod.light_green)
PlaySFX(SFX_CONFUSEHIT)
def cast_lightning():
#find the closest enemy in range and damage it
monster = closest_monster(LIGHTNING_RANGE)
if monster is None: #no enemy within range
message('No enemy is close enough to strike.', libtcod.red)
return 'cancelled'
#zap it!!
message('A bolt of lightning strikes the ' + monster.name + ' with a loud thunder, causing ' + str(LIGHTNING_DAMAGE) + ' damage!', libtcod.light_blue)
monster.fighter.take_damage(LIGHTNING_DAMAGE)
PlaySFX(SFX_LIGHTNINGHIT)
def cast_fireball():
global game_state
game_state = 'playing'
#ask the player for a target tile
message('Left-click a target for the fireball, or right-click to cancel.', libtcod.light_cyan)
(x, y) = target_tile()
if x is None: return 'cancelled'
message('The fireball explodes, burning everything within ' + str(FIREBALL_RADIUS) + ' tiles!', libtcod.orange)
for obj in objects:
if obj.distance(x, y) <= FIREBALL_RADIUS and obj.fighter:
message('The ' + obj.name + ' gets burned for ' + str(FIREBALL_DAMAGE) + ' damage!', libtcod.cyan)
obj.fighter.take_damage(FIREBALL_DAMAGE)
libtcod.console_flush()
PlaySFX(SFX_FIREBALLHIT)
def use_smallhealthpot():
#healing potion
if player.fighter.hp == player.fighter.max_hp:
message('You are already at full health.', libtcod.red)
return 'cancelled'
message('Your wounds start to feel better!', libtcod.light_violet)
player.fighter.heal(SmallHealthPot)
#############################################
# Initialization & Main Loop
#############################################
libtcod.console_set_custom_font('cp437_10x10.png', libtcod.FONT_TYPE_GREYSCALE | libtcod.FONT_LAYOUT_ASCII_INROW)
libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, "A'Rel", False)
con = libtcod.console_new(MAP_WIDTH, MAP_HEIGHT)
panel = libtcod.console_new(SCREEN_WIDTH, PANEL_HEIGHT)
tooltip = libtcod.console_new(SCREEN_WIDTH, SCREEN_HEIGHT)
menus = libtcod.console_new(SCREEN_WIDTH, SCREEN_HEIGHT)
pygame.init()
pygame.mixer.init()
mouse = libtcod.Mouse()
key = libtcod.Key()
def save_game():
#open a new empty shelve (possibly overwriting an old one) to write the game data
file = shelve.open('savegame', 'n')
file['map'] = map
file['objects'] = objects
file['player_index'] = objects.index(player)
file['inventory'] = inventory
file['game_msgs'] = game_msgs
file['game_state'] = game_state
file['stairs_index'] = objects.index(stairs)
file['dungeon_level'] = dungeon_level
file.close()
def load_game():
#open the previously saved shelve and load game data
global map, objects, player, inventory, game_msgs, game_state, stairs, dungeon_level
file = shelve.open('savegame', 'r')
map = file['map']
objects = file['objects']
player = objects[file['player_index']]
inventory = file['inventory']
game_msgs = file['game_msgs']
game_state = file['game_state']
stairs = objects[file['stairs_index']]
dungeon_level = file['dungeon_level']
file.close()
initialize_fov()
def new_game():
global player, inventory, game_msgs, game_state, dungeon_level
#create object representing the player
fighter_component = Fighter(hp=100, defense=1, power=4, speed=3, dexterity=3, xp=0, sight_range=0, death_function=player_death, atksound=SFX_PLAYERPUNCH)
player = Object(SCREEN_WIDTH/2, SCREEN_HEIGHT/2, 2, 'Heroman', libtcod.white, blocks=True, fighter=fighter_component)
player.level = 1
dungeon_level = 1
make_map()
initialize_fov()
game_state = 'playing'
inventory = []
#create the list of game messages and their colors, starts empty
game_msgs = []
message('Welcome stranger! Pepare to perish in the Tombs of the Ancient!', libtcod.dark_red)
#initial equipment; a dagger
equipment_component = Equipment(slot='your main hand', power_bonus=2)
obj = Object(0, 0, '-', 'Broken Dagger', libtcod.sky, equipment=equipment_component)
inventory.append(obj)
equipment_component.equip()
obj.always_visible = True
def initialize_fov():
global fov_recompute, fov_map
fov_recompute = True
libtcod.console_clear(con)
#create the FOV map, according to the generated map
fov_map = libtcod.map_new(MAP_WIDTH, MAP_HEIGHT)
for y in range(MAP_HEIGHT):
for x in range(MAP_WIDTH):
libtcod.map_set_properties(fov_map, x, y, not map[x][y].block_sight, not map[x][y].blocked)
def play_game():
global key, mouse
player_action = None
#pygame.mixer.music.load(BGM_CAVEMUSIC)
#pygame.mixer.music.set_volume(USER_BGM_VOL)
#pygame.mixer.music.play(-1, 0)
while not libtcod.console_is_window_closed():
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS|libtcod.EVENT_MOUSE,key,mouse)
#render the screen
render_all()
libtcod.console_flush()
check_level_up()
#erase all objects at their old locations, before they move
for object in objects:
object.clear()
#handle keys and exit game if needed
player_action = handle_keys()
if player_action == 'exit':
save_game()
break
if player_action == 'char-details':
MENU_CHAR_DETAILS = not MENU_CHAR_DETAILS
if game_state == 'playing' and player_action != 'didnt-take-turn':
for object in objects:
if object.ai:
object.fighter.timer += object.fighter.speed
while object.fighter.timer >= player.fighter.speed:
object.ai.take_turn()
object.fighter.timer -= player.fighter.speed
def main_menu():
img = libtcod.image_load('menu.png')
#pygame.mixer.music.load(BGM_TITLEMUSIC)
#pygame.mixer.music.set_volume(USER_BGM_VOL)
#pygame.mixer.music.play(-1, 0)
while not libtcod.console_is_window_closed():
libtcod.console_clear(0)
libtcod.image_blit_2x(img, 0, 0, 0)
#show the game's title, and some credits!
libtcod.console_set_default_foreground(0, libtcod.light_yellow)
libtcod.console_print_ex(0, SCREEN_WIDTH/2, SCREEN_HEIGHT/2-4, libtcod.BKGND_NONE, libtcod.CENTER,
"A'Rel")
libtcod.console_print_ex(0, SCREEN_WIDTH/2, SCREEN_HEIGHT-2, libtcod.BKGND_NONE, libtcod.CENTER,
'Mister Moxxie')
choice = menu('', ['Play a new game', 'Continue last game', 'Quit'], 24)
if choice == 0: #new game
new_game()
play_game()
if choice == 1: #load game
try:
load_game()
except:
msgbox('\n No saved game to load.\n', 24)
continue
play_game()
elif choice == 2: #quit
break
main_menu()
|
MisterMoxxie/ARel
|
ARelMain.py
|
Python
|
gpl-3.0
| 66,687
|
[
"Amber"
] |
92c33612e4eb2b71cf1b2dde7d597163dd1c07a805af8cf60fb53d83e95c260f
|
#!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Unrestricted CISD
'''
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.cc import uccsd
from pyscf.cc import uccsd_rdm
from pyscf.ci import cisd
from pyscf.fci import cistring
from pyscf.cc.ccsd import _unpack_4fold
def make_diagonal(myci, eris):
nocca, noccb = eris.nocc
nmoa = eris.focka.shape[0]
nmob = eris.fockb.shape[1]
nvira = nmoa - nocca
nvirb = nmob - noccb
jdiag_aa = numpy.zeros((nmoa,nmoa))
jdiag_ab = numpy.zeros((nmoa,nmob))
jdiag_bb = numpy.zeros((nmob,nmob))
jdiag_aa[:nocca,:nocca] = numpy.einsum('iijj->ij', eris.oooo)
jdiag_aa[:nocca,nocca:] = numpy.einsum('iijj->ij', eris.oovv)
jdiag_aa[nocca:,:nocca] = jdiag_aa[:nocca,nocca:].T
jdiag_ab[:nocca,:noccb] = numpy.einsum('iijj->ij', eris.ooOO)
jdiag_ab[:nocca,noccb:] = numpy.einsum('iijj->ij', eris.ooVV)
jdiag_ab[nocca:,:noccb] = numpy.einsum('iijj->ji', eris.OOvv)
jdiag_bb[:noccb,:noccb] = numpy.einsum('iijj->ij', eris.OOOO)
jdiag_bb[:noccb,noccb:] = numpy.einsum('iijj->ij', eris.OOVV)
jdiag_bb[noccb:,:noccb] = jdiag_bb[:noccb,noccb:].T
kdiag_aa = numpy.zeros((nmoa,nmoa))
kdiag_bb = numpy.zeros((nmob,nmob))
kdiag_aa[:nocca,:nocca] = numpy.einsum('ijji->ij', eris.oooo)
kdiag_aa[:nocca,nocca:] = numpy.einsum('ijji->ij', eris.ovvo)
kdiag_aa[nocca:,:nocca] = kdiag_aa[:nocca,nocca:].T
kdiag_bb[:noccb,:noccb] = numpy.einsum('ijji->ij', eris.OOOO)
kdiag_bb[:noccb,noccb:] = numpy.einsum('ijji->ij', eris.OVVO)
kdiag_bb[noccb:,:noccb] = kdiag_bb[:noccb,noccb:].T
# if eris.vvvv is not None and eris.vvVV is not None and eris.VVVV is not None:
# def diag_idx(n):
# idx = numpy.arange(n)
# return idx * (idx + 1) // 2 + idx
# jdiag_aa[nocca:,nocca:] = eris.vvvv[diag_idx(nvira)[:,None],diag_idx(nvira)]
# jdiag_ab[nocca:,noccb:] = eris.vvVV[diag_idx(nvira)[:,None],diag_idx(nvirb)]
# jdiag_bb[noccb:,noccb:] = eris.VVVV[diag_idx(nvirb)[:,None],diag_idx(nvirb)]
# kdiag_aa[nocca:,nocca:] = lib.unpack_tril(eris.vvvv.diagonal())
# kdiag_bb[noccb:,noccb:] = lib.unpack_tril(eris.VVVV.diagonal())
jkdiag_aa = jdiag_aa - kdiag_aa
jkdiag_bb = jdiag_bb - kdiag_bb
mo_ea = eris.focka.diagonal()
mo_eb = eris.fockb.diagonal()
ehf = (mo_ea[:nocca].sum() + mo_eb[:noccb].sum()
- jkdiag_aa[:nocca,:nocca].sum() * .5
- jdiag_ab[:nocca,:noccb].sum()
- jkdiag_bb[:noccb,:noccb].sum() * .5)
dia_a = lib.direct_sum('a-i->ia', mo_ea[nocca:], mo_ea[:nocca])
dia_a -= jkdiag_aa[:nocca,nocca:]
dia_b = lib.direct_sum('a-i->ia', mo_eb[noccb:], mo_eb[:noccb])
dia_b -= jkdiag_bb[:noccb,noccb:]
e1diag_a = dia_a + ehf
e1diag_b = dia_b + ehf
e2diag_aa = lib.direct_sum('ia+jb->ijab', dia_a, dia_a)
e2diag_aa += ehf
e2diag_aa += jkdiag_aa[:nocca,:nocca].reshape(nocca,nocca,1,1)
e2diag_aa -= jkdiag_aa[:nocca,nocca:].reshape(nocca,1,1,nvira)
e2diag_aa -= jkdiag_aa[:nocca,nocca:].reshape(1,nocca,nvira,1)
e2diag_aa += jkdiag_aa[nocca:,nocca:].reshape(1,1,nvira,nvira)
e2diag_ab = lib.direct_sum('ia+jb->ijab', dia_a, dia_b)
e2diag_ab += ehf
e2diag_ab += jdiag_ab[:nocca,:noccb].reshape(nocca,noccb,1,1)
e2diag_ab += jdiag_ab[nocca:,noccb:].reshape(1,1,nvira,nvirb)
e2diag_ab -= jdiag_ab[:nocca,noccb:].reshape(nocca,1,1,nvirb)
e2diag_ab -= jdiag_ab[nocca:,:noccb].T.reshape(1,noccb,nvira,1)
e2diag_bb = lib.direct_sum('ia+jb->ijab', dia_b, dia_b)
e2diag_bb += ehf
e2diag_bb += jkdiag_bb[:noccb,:noccb].reshape(noccb,noccb,1,1)
e2diag_bb -= jkdiag_bb[:noccb,noccb:].reshape(noccb,1,1,nvirb)
e2diag_bb -= jkdiag_bb[:noccb,noccb:].reshape(1,noccb,nvirb,1)
e2diag_bb += jkdiag_bb[noccb:,noccb:].reshape(1,1,nvirb,nvirb)
return amplitudes_to_cisdvec(ehf, (e1diag_a, e1diag_b),
(e2diag_aa, e2diag_ab, e2diag_bb))
def contract(myci, civec, eris):
nocca, noccb = eris.nocc
nmoa = eris.focka.shape[0]
nmob = eris.fockb.shape[0]
nvira = nmoa - nocca
nvirb = nmob - noccb
c0, (c1a,c1b), (c2aa,c2ab,c2bb) = \
cisdvec_to_amplitudes(civec, (nmoa,nmob), (nocca,noccb))
#:t2 += 0.5*einsum('ijef,abef->ijab', c2, eris.vvvv)
#:eris_vvvv = ao2mo.restore(1, eris.vvvv, nvira)
#:eris_vvVV = ucisd_slow._restore(eris.vvVV, nvira, nvirb)
#:eris_VVVV = ao2mo.restore(1, eris.VVVV, nvirb)
#:t2aa += lib.einsum('ijef,aebf->ijab', c2aa, eris_vvvv)
#:t2bb += lib.einsum('ijef,aebf->ijab', c2bb, eris_VVVV)
#:t2ab += lib.einsum('iJeF,aeBF->iJaB', c2ab, eris_vvVV)
t2aa, t2ab, t2bb = myci._add_vvvv(None, (c2aa,c2ab,c2bb), eris)
t2aa *= .25
t2bb *= .25
fooa = eris.focka[:nocca,:nocca]
foob = eris.fockb[:noccb,:noccb]
fova = eris.focka[:nocca,nocca:]
fovb = eris.fockb[:noccb,noccb:]
fvoa = eris.focka[nocca:,:nocca]
fvob = eris.fockb[noccb:,:noccb]
fvva = eris.focka[nocca:,nocca:]
fvvb = eris.fockb[noccb:,noccb:]
t0 = 0
t1a = 0
t1b = 0
eris_oovv = _cp(eris.oovv)
eris_ooVV = _cp(eris.ooVV)
eris_OOvv = _cp(eris.OOvv)
eris_OOVV = _cp(eris.OOVV)
eris_ovov = _cp(eris.ovov)
eris_ovOV = _cp(eris.ovOV)
eris_OVOV = _cp(eris.OVOV)
#:t2 += eris.oovv * c0
t2aa += .25 * c0 * eris_ovov.conj().transpose(0,2,1,3)
t2aa -= .25 * c0 * eris_ovov.conj().transpose(0,2,3,1)
t2bb += .25 * c0 * eris_OVOV.conj().transpose(0,2,1,3)
t2bb -= .25 * c0 * eris_OVOV.conj().transpose(0,2,3,1)
t2ab += c0 * eris_ovOV.conj().transpose(0,2,1,3)
#:t0 += numpy.einsum('ijab,ijab', eris.oovv, c2) * .25
t0 += numpy.einsum('iajb,ijab', eris_ovov, c2aa) * .25
t0 -= numpy.einsum('jaib,ijab', eris_ovov, c2aa) * .25
t0 += numpy.einsum('iajb,ijab', eris_OVOV, c2bb) * .25
t0 -= numpy.einsum('jaib,ijab', eris_OVOV, c2bb) * .25
t0 += numpy.einsum('iajb,ijab', eris_ovOV, c2ab)
eris_ovov = eris_ovOV = eris_OVOV = None
#:tmp = einsum('imae,mbej->ijab', c2, eris.ovvo)
#:tmp = tmp - tmp.transpose(0,1,3,2)
#:t2 += tmp - tmp.transpose(1,0,2,3)
eris_ovvo = _cp(eris.ovvo)
eris_ovVO = _cp(eris.ovVO)
eris_OVVO = _cp(eris.OVVO)
ovvo = eris_ovvo - eris_oovv.transpose(0,3,2,1)
OVVO = eris_OVVO - eris_OOVV.transpose(0,3,2,1)
t2aa += lib.einsum('imae,jbem->ijab', c2aa, ovvo)
t2aa += lib.einsum('iMaE,jbEM->ijab', c2ab, eris_ovVO)
t2bb += lib.einsum('imae,jbem->ijab', c2bb, OVVO)
t2bb += lib.einsum('mIeA,meBJ->IJAB', c2ab, eris_ovVO)
t2ab += lib.einsum('imae,meBJ->iJaB', c2aa, eris_ovVO)
t2ab += lib.einsum('iMaE,MEBJ->iJaB', c2ab, OVVO)
t2ab += lib.einsum('IMAE,jbEM->jIbA', c2bb, eris_ovVO)
t2ab += lib.einsum('mIeA,jbem->jIbA', c2ab, ovvo)
t2ab -= lib.einsum('iMeA,JMeb->iJbA', c2ab, eris_OOvv)
t2ab -= lib.einsum('mIaE,jmEB->jIaB', c2ab, eris_ooVV)
#:t1 += einsum('nf,nafi->ia', c1, eris.ovvo)
t1a += numpy.einsum('nf,nfai->ia', c1a, eris_ovvo)
t1a -= numpy.einsum('nf,nifa->ia', c1a, eris_oovv)
t1b += numpy.einsum('nf,nfai->ia', c1b, eris_OVVO)
t1b -= numpy.einsum('nf,nifa->ia', c1b, eris_OOVV)
t1b += numpy.einsum('nf,nfai->ia', c1a, eris_ovVO)
t1a += numpy.einsum('nf,iafn->ia', c1b, eris_ovVO)
#:t1 -= 0.5*einsum('mnae,mnie->ia', c2, eris.ooov)
eris_ovoo = _cp(eris.ovoo)
eris_OVOO = _cp(eris.OVOO)
eris_OVoo = _cp(eris.OVoo)
eris_ovOO = _cp(eris.ovOO)
t1a += lib.einsum('mnae,meni->ia', c2aa, eris_ovoo)
t1b += lib.einsum('mnae,meni->ia', c2bb, eris_OVOO)
t1a -= lib.einsum('nMaE,MEni->ia', c2ab, eris_OVoo)
t1b -= lib.einsum('mNeA,meNI->IA', c2ab, eris_ovOO)
#:tmp = einsum('ma,mbij->ijab', c1, eris.ovoo)
#:t2 -= tmp - tmp.transpose(0,1,3,2)
t2aa -= lib.einsum('ma,jbmi->jiba', c1a, eris_ovoo)
t2bb -= lib.einsum('ma,jbmi->jiba', c1b, eris_OVOO)
t2ab -= lib.einsum('ma,JBmi->iJaB', c1a, eris_OVoo)
t2ab -= lib.einsum('MA,ibMJ->iJbA', c1b, eris_ovOO)
#:#:t1 -= 0.5*einsum('imef,maef->ia', c2, eris.ovvv)
#:eris_ovvv = _cp(eris.ovvv)
#:eris_OVVV = _cp(eris.OVVV)
#:eris_ovVV = _cp(eris.ovVV)
#:eris_OVvv = _cp(eris.OVvv)
#:t1a += lib.einsum('mief,mefa->ia', c2aa, eris_ovvv)
#:t1b += lib.einsum('MIEF,MEFA->IA', c2bb, eris_OVVV)
#:t1a += lib.einsum('iMfE,MEaf->ia', c2ab, eris_OVvv)
#:t1b += lib.einsum('mIeF,meAF->IA', c2ab, eris_ovVV)
#:#:tmp = einsum('ie,jeba->ijab', c1, numpy.asarray(eris.ovvv).conj())
#:#:t2 += tmp - tmp.transpose(1,0,2,3)
#:t2aa += lib.einsum('ie,mbae->imab', c1a, eris_ovvv)
#:t2bb += lib.einsum('ie,mbae->imab', c1b, eris_OVVV)
#:t2ab += lib.einsum('ie,MBae->iMaB', c1a, eris_OVvv)
#:t2ab += lib.einsum('IE,maBE->mIaB', c1b, eris_ovVV)
mem_now = lib.current_memory()[0]
max_memory = max(0, lib.param.MAX_MEMORY - mem_now)
if nvira > 0 and nocca > 0:
blksize = max(int(max_memory*1e6/8/(nvira**2*nocca*2)), 2)
for p0,p1 in lib.prange(0, nvira, blksize):
ovvv = eris.get_ovvv(slice(None), slice(p0,p1))
t1a += lib.einsum('mief,mefa->ia', c2aa[:,:,p0:p1], ovvv)
t2aa[:,:,p0:p1] += lib.einsum('mbae,ie->miba', ovvv, c1a)
ovvv = None
if nvirb > 0 and noccb > 0:
blksize = max(int(max_memory*1e6/8/(nvirb**2*noccb*2)), 2)
for p0,p1 in lib.prange(0, nvirb, blksize):
OVVV = eris.get_OVVV(slice(None), slice(p0,p1))
t1b += lib.einsum('MIEF,MEFA->IA', c2bb[:,:,p0:p1], OVVV)
t2bb[:,:,p0:p1] += lib.einsum('mbae,ie->miba', OVVV, c1b)
OVVV = None
if nvirb > 0 and nocca > 0:
blksize = max(int(max_memory*1e6/8/(nvirb**2*nocca*2)), 2)
for p0,p1 in lib.prange(0, nvira, blksize):
ovVV = eris.get_ovVV(slice(None), slice(p0,p1))
t1b += lib.einsum('mIeF,meAF->IA', c2ab[:,:,p0:p1], ovVV)
t2ab[:,:,p0:p1] += lib.einsum('maBE,IE->mIaB', ovVV, c1b)
ovVV = None
if nvira > 0 and noccb > 0:
blksize = max(int(max_memory*1e6/8/(nvira**2*noccb*2)), 2)
for p0,p1 in lib.prange(0, nvirb, blksize):
OVvv = eris.get_OVvv(slice(None), slice(p0,p1))
t1a += lib.einsum('iMfE,MEaf->ia', c2ab[:,:,:,p0:p1], OVvv)
t2ab[:,:,:,p0:p1] += lib.einsum('MBae,ie->iMaB', OVvv, c1a)
OVvv = None
#:t1 = einsum('ie,ae->ia', c1, fvv)
t1a += lib.einsum('ie,ae->ia', c1a, fvva)
t1b += lib.einsum('ie,ae->ia', c1b, fvvb)
#:t1 -= einsum('ma,mi->ia', c1, foo)
t1a -= lib.einsum('ma,mi->ia', c1a, fooa)
t1b -= lib.einsum('ma,mi->ia', c1b, foob)
#:t1 += einsum('imae,me->ia', c2, fov)
t1a += numpy.einsum('imae,me->ia', c2aa, fova)
t1a += numpy.einsum('imae,me->ia', c2ab, fovb)
t1b += numpy.einsum('imae,me->ia', c2bb, fovb)
t1b += numpy.einsum('miea,me->ia', c2ab, fova)
#:tmp = einsum('ijae,be->ijab', c2, fvv)
#:t2 = tmp - tmp.transpose(0,1,3,2)
t2aa += lib.einsum('ijae,be->ijab', c2aa, fvva*.5)
t2bb += lib.einsum('ijae,be->ijab', c2bb, fvvb*.5)
t2ab += lib.einsum('iJaE,BE->iJaB', c2ab, fvvb)
t2ab += lib.einsum('iJeA,be->iJbA', c2ab, fvva)
#:tmp = einsum('imab,mj->ijab', c2, foo)
#:t2 -= tmp - tmp.transpose(1,0,2,3)
t2aa -= lib.einsum('imab,mj->ijab', c2aa, fooa*.5)
t2bb -= lib.einsum('imab,mj->ijab', c2bb, foob*.5)
t2ab -= lib.einsum('iMaB,MJ->iJaB', c2ab, foob)
t2ab -= lib.einsum('mIaB,mj->jIaB', c2ab, fooa)
#:tmp = numpy.einsum('ia,bj->ijab', c1, fvo)
#:tmp = tmp - tmp.transpose(0,1,3,2)
#:t2 += tmp - tmp.transpose(1,0,2,3)
t2aa += numpy.einsum('ia,bj->ijab', c1a, fvoa)
t2bb += numpy.einsum('ia,bj->ijab', c1b, fvob)
t2ab += numpy.einsum('ia,bj->ijab', c1a, fvob)
t2ab += numpy.einsum('ia,bj->jiba', c1b, fvoa)
t2aa = t2aa - t2aa.transpose(0,1,3,2)
t2aa = t2aa - t2aa.transpose(1,0,2,3)
t2bb = t2bb - t2bb.transpose(0,1,3,2)
t2bb = t2bb - t2bb.transpose(1,0,2,3)
#:t2 += 0.5*einsum('mnab,mnij->ijab', c2, eris.oooo)
eris_oooo = _cp(eris.oooo)
eris_OOOO = _cp(eris.OOOO)
eris_ooOO = _cp(eris.ooOO)
t2aa += lib.einsum('mnab,minj->ijab', c2aa, eris_oooo)
t2bb += lib.einsum('mnab,minj->ijab', c2bb, eris_OOOO)
t2ab += lib.einsum('mNaB,miNJ->iJaB', c2ab, eris_ooOO)
#:t1 += fov.conj() * c0
t1a += fova.conj() * c0
t1b += fovb.conj() * c0
#:t0 = numpy.einsum('ia,ia', fov, c1)
t0 += numpy.einsum('ia,ia', fova, c1a)
t0 += numpy.einsum('ia,ia', fovb, c1b)
return amplitudes_to_cisdvec(t0, (t1a,t1b), (t2aa,t2ab,t2bb))
def amplitudes_to_cisdvec(c0, c1, c2):
c1a, c1b = c1
c2aa, c2ab, c2bb = c2
nocca, nvira = c1a.shape
noccb, nvirb = c1b.shape
def trilidx(n):
idx = numpy.tril_indices(n, -1)
return idx[0] * n + idx[1]
ooidxa = trilidx(nocca)
vvidxa = trilidx(nvira)
ooidxb = trilidx(noccb)
vvidxb = trilidx(nvirb)
size = (1, nocca*nvira, noccb*nvirb, nocca*noccb*nvira*nvirb,
len(ooidxa)*len(vvidxa), len(ooidxb)*len(vvidxb))
loc = numpy.cumsum(size)
civec = numpy.empty(loc[-1], dtype=c2ab.dtype)
civec[0] = c0
civec[loc[0]:loc[1]] = c1a.ravel()
civec[loc[1]:loc[2]] = c1b.ravel()
civec[loc[2]:loc[3]] = c2ab.ravel()
lib.take_2d(c2aa.reshape(nocca**2,nvira**2), ooidxa, vvidxa, out=civec[loc[3]:loc[4]])
lib.take_2d(c2bb.reshape(noccb**2,nvirb**2), ooidxb, vvidxb, out=civec[loc[4]:loc[5]])
return civec
def cisdvec_to_amplitudes(civec, nmo, nocc):
norba, norbb = nmo
nocca, noccb = nocc
nvira = norba - nocca
nvirb = norbb - noccb
nooa = nocca * (nocca-1) // 2
nvva = nvira * (nvira-1) // 2
noob = noccb * (noccb-1) // 2
nvvb = nvirb * (nvirb-1) // 2
size = (1, nocca*nvira, noccb*nvirb, nocca*noccb*nvira*nvirb,
nooa*nvva, noob*nvvb)
loc = numpy.cumsum(size)
c0 = civec[0]
c1a = civec[loc[0]:loc[1]].reshape(nocca,nvira)
c1b = civec[loc[1]:loc[2]].reshape(noccb,nvirb)
c2ab = civec[loc[2]:loc[3]].reshape(nocca,noccb,nvira,nvirb)
c2aa = _unpack_4fold(civec[loc[3]:loc[4]], nocca, nvira)
c2bb = _unpack_4fold(civec[loc[4]:loc[5]], noccb, nvirb)
return c0, (c1a,c1b), (c2aa,c2ab,c2bb)
def to_fcivec(cisdvec, norb, nelec, frozen=None):
'''Convert CISD coefficients to FCI coefficients'''
if isinstance(nelec, (int, numpy.number)):
nelecb = nelec//2
neleca = nelec - nelecb
else:
neleca, nelecb = nelec
frozena_mask = numpy.zeros(norb, dtype=bool)
frozenb_mask = numpy.zeros(norb, dtype=bool)
if frozen is None:
nfroza = nfrozb = 0
elif isinstance(frozen, (int, numpy.integer)):
nfroza = nfrozb = frozen
frozena_mask[:frozen] = True
frozenb_mask[:frozen] = True
else:
nfroza = len(frozen[0])
nfrozb = len(frozen[1])
frozena_mask[frozen[0]] = True
frozenb_mask[frozen[1]] = True
# if nfroza != nfrozb:
# raise NotImplementedError
nocca = numpy.count_nonzero(~frozena_mask[:neleca])
noccb = numpy.count_nonzero(~frozenb_mask[:nelecb])
nmo = nmoa, nmob = norb - nfroza, norb - nfrozb
nocc = nocca, noccb
nvira, nvirb = nmoa - nocca, nmob - noccb
c0, c1, c2 = cisdvec_to_amplitudes(cisdvec, nmo, nocc)
c1a, c1b = c1
c2aa, c2ab, c2bb = c2
t1addra, t1signa = cisd.tn_addrs_signs(nmoa, nocca, 1)
t1addrb, t1signb = cisd.tn_addrs_signs(nmob, noccb, 1)
na = cistring.num_strings(nmoa, nocca)
nb = cistring.num_strings(nmob, noccb)
fcivec = numpy.zeros((na,nb))
fcivec[0,0] = c0
fcivec[t1addra,0] = c1a.ravel() * t1signa
fcivec[0,t1addrb] = c1b.ravel() * t1signb
c2ab = c2ab.transpose(0,2,1,3).reshape(nocca*nvira,-1)
c2ab = numpy.einsum('i,j,ij->ij', t1signa, t1signb, c2ab)
fcivec[t1addra[:,None],t1addrb] = c2ab
if nocca > 1 and nvira > 1:
ooidx = numpy.tril_indices(nocca, -1)
vvidx = numpy.tril_indices(nvira, -1)
c2aa = c2aa[ooidx][:,vvidx[0],vvidx[1]]
t2addra, t2signa = cisd.tn_addrs_signs(nmoa, nocca, 2)
fcivec[t2addra,0] = c2aa.ravel() * t2signa
if noccb > 1 and nvirb > 1:
ooidx = numpy.tril_indices(noccb, -1)
vvidx = numpy.tril_indices(nvirb, -1)
c2bb = c2bb[ooidx][:,vvidx[0],vvidx[1]]
t2addrb, t2signb = cisd.tn_addrs_signs(nmob, noccb, 2)
fcivec[0,t2addrb] = c2bb.ravel() * t2signb
if nfroza == nfrozb == 0:
return fcivec
assert(norb < 63)
strsa = cistring.gen_strings4orblist(range(norb), neleca)
strsb = cistring.gen_strings4orblist(range(norb), nelecb)
na = len(strsa)
nb = len(strsb)
count_a = numpy.zeros(na, dtype=int)
count_b = numpy.zeros(nb, dtype=int)
parity_a = numpy.zeros(na, dtype=bool)
parity_b = numpy.zeros(nb, dtype=bool)
core_a_mask = numpy.ones(na, dtype=bool)
core_b_mask = numpy.ones(nb, dtype=bool)
for i in range(norb):
if frozena_mask[i]:
if i < neleca:
core_a_mask &= (strsa & (1 <<i )) != 0
parity_a ^= (count_a & 1) == 1
else:
core_a_mask &= (strsa & (1 << i)) == 0
else:
count_a += (strsa & (1 << i)) != 0
if frozenb_mask[i]:
if i < nelecb:
core_b_mask &= (strsb & (1 <<i )) != 0
parity_b ^= (count_b & 1) == 1
else:
core_b_mask &= (strsb & (1 << i)) == 0
else:
count_b += (strsb & (1 << i)) != 0
sub_strsa = strsa[core_a_mask & (count_a == nocca)]
sub_strsb = strsb[core_b_mask & (count_b == noccb)]
addrsa = cistring.strs2addr(norb, neleca, sub_strsa)
addrsb = cistring.strs2addr(norb, nelecb, sub_strsb)
fcivec1 = numpy.zeros((na,nb))
fcivec1[addrsa[:,None],addrsb] = fcivec
fcivec1[parity_a,:] *= -1
fcivec1[:,parity_b] *= -1
return fcivec1
def from_fcivec(ci0, norb, nelec, frozen=None):
'''Extract CISD coefficients from FCI coefficients'''
if not (frozen is None or frozen == 0):
raise NotImplementedError
if isinstance(nelec, (int, numpy.number)):
nelecb = nelec//2
neleca = nelec - nelecb
else:
neleca, nelecb = nelec
norba = norbb = norb
nocca, noccb = neleca, nelecb
nvira = norba - nocca
nvirb = norbb - noccb
t1addra, t1signa = cisd.tn_addrs_signs(norba, nocca, 1)
t1addrb, t1signb = cisd.tn_addrs_signs(norbb, noccb, 1)
na = cistring.num_strings(norba, nocca)
nb = cistring.num_strings(norbb, noccb)
ci0 = ci0.reshape(na,nb)
c0 = ci0[0,0]
c1a = (ci0[t1addra,0] * t1signa).reshape(nocca,nvira)
c1b = (ci0[0,t1addrb] * t1signb).reshape(noccb,nvirb)
c2ab = numpy.einsum('i,j,ij->ij', t1signa, t1signb, ci0[t1addra[:,None],t1addrb])
c2ab = c2ab.reshape(nocca,nvira,noccb,nvirb).transpose(0,2,1,3)
t2addra, t2signa = cisd.tn_addrs_signs(norba, nocca, 2)
t2addrb, t2signb = cisd.tn_addrs_signs(norbb, noccb, 2)
c2aa = (ci0[t2addra,0] * t2signa).reshape(nocca*(nocca-1)//2, nvira*(nvira-1)//2)
c2aa = _unpack_4fold(c2aa, nocca, nvira)
c2bb = (ci0[0,t2addrb] * t2signb).reshape(noccb*(noccb-1)//2, nvirb*(nvirb-1)//2)
c2bb = _unpack_4fold(c2bb, noccb, nvirb)
return amplitudes_to_cisdvec(c0, (c1a,c1b), (c2aa,c2ab,c2bb))
def overlap(cibra, ciket, nmo, nocc, s=None):
'''Overlap between two CISD wavefunctions.
Args:
s : a list of 2D arrays
The overlap matrix of non-orthogonal one-particle basis
'''
if s is None:
return numpy.dot(cibra, ciket, nmo, nocc)
if isinstance(nmo, (int, numpy.integer)):
nmoa = nmob = nmo
else:
nmoa, nmob = nmo
nocca, noccb = nocc
nvira, nvirb = nmoa - nocca, nmob - noccb
bra0, bra1, bra2 = cisdvec_to_amplitudes(cibra, (nmoa,nmob), nocc)
ket0, ket1, ket2 = cisdvec_to_amplitudes(ciket, (nmoa,nmob), nocc)
ooidx = numpy.tril_indices(nocca, -1)
vvidx = numpy.tril_indices(nvira, -1)
bra2aa = lib.take_2d(bra2[0].reshape(nocca**2,nvira**2),
ooidx[0]*nocca+ooidx[1], vvidx[0]*nvira+vvidx[1])
ket2aa = lib.take_2d(ket2[0].reshape(nocca**2,nvira**2),
ooidx[0]*nocca+ooidx[1], vvidx[0]*nvira+vvidx[1])
ooidx = numpy.tril_indices(noccb, -1)
vvidx = numpy.tril_indices(nvirb, -1)
bra2bb = lib.take_2d(bra2[2].reshape(noccb**2,nvirb**2),
ooidx[0]*noccb+ooidx[1], vvidx[0]*nvirb+vvidx[1])
ket2bb = lib.take_2d(ket2[2].reshape(noccb**2,nvirb**2),
ooidx[0]*noccb+ooidx[1], vvidx[0]*nvirb+vvidx[1])
nova = nocca * nvira
novb = noccb * nvirb
occlist0a = numpy.arange(nocca).reshape(1,nocca)
occlist0b = numpy.arange(noccb).reshape(1,noccb)
occlistsa = numpy.repeat(occlist0a, 1+nova+bra2aa.size, axis=0)
occlistsb = numpy.repeat(occlist0b, 1+novb+bra2bb.size, axis=0)
occlist0a = occlistsa[:1]
occlist1a = occlistsa[1:1+nova]
occlist2a = occlistsa[1+nova:]
occlist0b = occlistsb[:1]
occlist1b = occlistsb[1:1+novb]
occlist2b = occlistsb[1+novb:]
ia = 0
for i in range(nocca):
for a in range(nocca, nmoa):
occlist1a[ia,i] = a
ia += 1
ia = 0
for i in range(noccb):
for a in range(noccb, nmob):
occlist1b[ia,i] = a
ia += 1
ia = 0
for i in range(nocca):
for j in range(i):
for a in range(nocca, nmoa):
for b in range(nocca, a):
occlist2a[ia,i] = a
occlist2a[ia,j] = b
ia += 1
ia = 0
for i in range(noccb):
for j in range(i):
for a in range(noccb, nmob):
for b in range(noccb, a):
occlist2b[ia,i] = a
occlist2b[ia,j] = b
ia += 1
na = len(occlistsa)
trans_a = numpy.empty((na,na))
for i, idx in enumerate(occlistsa):
s_sub = s[0][idx].T.copy()
minors = s_sub[occlistsa]
trans_a[i,:] = numpy.linalg.det(minors)
nb = len(occlistsb)
trans_b = numpy.empty((nb,nb))
for i, idx in enumerate(occlistsb):
s_sub = s[1][idx].T.copy()
minors = s_sub[occlistsb]
trans_b[i,:] = numpy.linalg.det(minors)
# Mimic the transformation einsum('ab,ap->pb', FCI, trans).
# The wavefunction FCI has the [excitation_alpha,excitation_beta]
# representation. The zero blocks like FCI[S_alpha,D_beta],
# FCI[D_alpha,D_beta], are explicitly excluded.
bra_mat = numpy.zeros((na,nb))
bra_mat[0,0] = bra0
bra_mat[1:1+nova,0] = bra1[0].ravel()
bra_mat[0,1:1+novb] = bra1[1].ravel()
bra_mat[1+nova:,0] = bra2aa.ravel()
bra_mat[0,1+novb:] = bra2bb.ravel()
bra_mat[1:1+nova,1:1+novb] = bra2[1].transpose(0,2,1,3).reshape(nova,novb)
c_s = lib.einsum('ab,ap,bq->pq', bra_mat, trans_a, trans_b)
ovlp = c_s[0,0] * ket0
ovlp += numpy.dot(c_s[1:1+nova,0], ket1[0].ravel())
ovlp += numpy.dot(c_s[0,1:1+novb], ket1[1].ravel())
ovlp += numpy.dot(c_s[1+nova:,0] , ket2aa.ravel())
ovlp += numpy.dot(c_s[0,1+novb:] , ket2bb.ravel())
ovlp += numpy.einsum('ijab,iajb->', ket2[1],
c_s[1:1+nova,1:1+novb].reshape(nocca,nvira,noccb,nvirb))
return ovlp
def make_rdm1(myci, civec=None, nmo=None, nocc=None, ao_repr=False):
r'''
One-particle spin density matrices dm1a, dm1b in MO basis (the
occupied-virtual blocks due to the orbital response contribution are not
included).
dm1a[p,q] = <q_alpha^\dagger p_alpha>
dm1b[p,q] = <q_beta^\dagger p_beta>
The convention of 1-pdm is based on McWeeney's book, Eq (5.4.20).
'''
if civec is None: civec = myci.ci
if nmo is None: nmo = myci.nmo
if nocc is None: nocc = myci.nocc
d1 = _gamma1_intermediates(myci, civec, nmo, nocc)
return uccsd_rdm._make_rdm1(myci, d1, with_frozen=True, ao_repr=ao_repr)
def make_rdm2(myci, civec=None, nmo=None, nocc=None, ao_repr=False):
r'''
Two-particle spin density matrices dm2aa, dm2ab, dm2bb in MO basis
dm2aa[p,q,r,s] = <q_alpha^\dagger s_alpha^\dagger r_alpha p_alpha>
dm2ab[p,q,r,s] = <q_alpha^\dagger s_beta^\dagger r_beta p_alpha>
dm2bb[p,q,r,s] = <q_beta^\dagger s_beta^\dagger r_beta p_beta>
(p,q correspond to one particle and r,s correspond to another particle)
Two-particle density matrix should be contracted to integrals with the
pattern below to compute energy
E = numpy.einsum('pqrs,pqrs', eri_aa, dm2_aa)
E+= numpy.einsum('pqrs,pqrs', eri_ab, dm2_ab)
E+= numpy.einsum('pqrs,rspq', eri_ba, dm2_ab)
E+= numpy.einsum('pqrs,pqrs', eri_bb, dm2_bb)
where eri_aa[p,q,r,s] = (p_alpha q_alpha | r_alpha s_alpha )
eri_ab[p,q,r,s] = ( p_alpha q_alpha | r_beta s_beta )
eri_ba[p,q,r,s] = ( p_beta q_beta | r_alpha s_alpha )
eri_bb[p,q,r,s] = ( p_beta q_beta | r_beta s_beta )
'''
if civec is None: civec = myci.ci
if nmo is None: nmo = myci.nmo
if nocc is None: nocc = myci.nocc
d1 = _gamma1_intermediates(myci, civec, nmo, nocc)
d2 = _gamma2_intermediates(myci, civec, nmo, nocc)
return uccsd_rdm._make_rdm2(myci, d1, d2, with_dm1=True, with_frozen=True,
ao_repr=ao_repr)
def _gamma1_intermediates(myci, civec, nmo, nocc):
nmoa, nmob = nmo
nocca, noccb = nocc
c0, c1, c2 = cisdvec_to_amplitudes(civec, nmo, nocc)
c1a, c1b = c1
c2aa, c2ab, c2bb = c2
dvoa = c0.conj() * c1a.T
dvob = c0.conj() * c1b.T
dvoa += numpy.einsum('jb,ijab->ai', c1a.conj(), c2aa)
dvoa += numpy.einsum('jb,ijab->ai', c1b.conj(), c2ab)
dvob += numpy.einsum('jb,ijab->ai', c1b.conj(), c2bb)
dvob += numpy.einsum('jb,jiba->ai', c1a.conj(), c2ab)
dova = dvoa.T.conj()
dovb = dvob.T.conj()
dooa =-numpy.einsum('ia,ka->ik', c1a.conj(), c1a)
doob =-numpy.einsum('ia,ka->ik', c1b.conj(), c1b)
dooa -= numpy.einsum('ijab,ikab->jk', c2aa.conj(), c2aa) * .5
dooa -= numpy.einsum('jiab,kiab->jk', c2ab.conj(), c2ab)
doob -= numpy.einsum('ijab,ikab->jk', c2bb.conj(), c2bb) * .5
doob -= numpy.einsum('ijab,ikab->jk', c2ab.conj(), c2ab)
dvva = numpy.einsum('ia,ic->ac', c1a, c1a.conj())
dvvb = numpy.einsum('ia,ic->ac', c1b, c1b.conj())
dvva += numpy.einsum('ijab,ijac->bc', c2aa, c2aa.conj()) * .5
dvva += numpy.einsum('ijba,ijca->bc', c2ab, c2ab.conj())
dvvb += numpy.einsum('ijba,ijca->bc', c2bb, c2bb.conj()) * .5
dvvb += numpy.einsum('ijab,ijac->bc', c2ab, c2ab.conj())
return (dooa, doob), (dova, dovb), (dvoa, dvob), (dvva, dvvb)
def _gamma2_intermediates(myci, civec, nmo, nocc):
nmoa, nmob = nmo
nocca, noccb = nocc
c0, c1, c2 = cisdvec_to_amplitudes(civec, nmo, nocc)
c1a, c1b = c1
c2aa, c2ab, c2bb = c2
goovv = c0 * c2aa.conj() * .5
goOvV = c0 * c2ab.conj()
gOOVV = c0 * c2bb.conj() * .5
govvv = numpy.einsum('ia,ikcd->kadc', c1a, c2aa.conj()) * .5
gOvVv = numpy.einsum('ia,ikcd->kadc', c1a, c2ab.conj())
goVvV = numpy.einsum('ia,kidc->kadc', c1b, c2ab.conj())
gOVVV = numpy.einsum('ia,ikcd->kadc', c1b, c2bb.conj()) * .5
gooov = numpy.einsum('ia,klac->klic', c1a, c2aa.conj()) *-.5
goOoV =-numpy.einsum('ia,klac->klic', c1a, c2ab.conj())
gOoOv =-numpy.einsum('ia,lkca->klic', c1b, c2ab.conj())
gOOOV = numpy.einsum('ia,klac->klic', c1b, c2bb.conj()) *-.5
goooo = numpy.einsum('ijab,klab->ijkl', c2aa.conj(), c2aa) * .25
goOoO = numpy.einsum('ijab,klab->ijkl', c2ab.conj(), c2ab)
gOOOO = numpy.einsum('ijab,klab->ijkl', c2bb.conj(), c2bb) * .25
gvvvv = numpy.einsum('ijab,ijcd->abcd', c2aa, c2aa.conj()) * .25
gvVvV = numpy.einsum('ijab,ijcd->abcd', c2ab, c2ab.conj())
gVVVV = numpy.einsum('ijab,ijcd->abcd', c2bb, c2bb.conj()) * .25
goVoV = numpy.einsum('jIaB,kIaC->jCkB', c2ab.conj(), c2ab)
gOvOv = numpy.einsum('iJbA,iKcA->JcKb', c2ab.conj(), c2ab)
govvo = numpy.einsum('ijab,ikac->jcbk', c2aa.conj(), c2aa)
govvo+= numpy.einsum('jIbA,kIcA->jcbk', c2ab.conj(), c2ab)
goVvO = numpy.einsum('jIbA,IKAC->jCbK', c2ab.conj(), c2bb)
goVvO+= numpy.einsum('ijab,iKaC->jCbK', c2aa.conj(), c2ab)
gOVVO = numpy.einsum('ijab,ikac->jcbk', c2bb.conj(), c2bb)
gOVVO+= numpy.einsum('iJaB,iKaC->JCBK', c2ab.conj(), c2ab)
govvo+= numpy.einsum('ia,jb->ibaj', c1a.conj(), c1a)
goVvO+= numpy.einsum('ia,jb->ibaj', c1a.conj(), c1b)
gOVVO+= numpy.einsum('ia,jb->ibaj', c1b.conj(), c1b)
dovov = goovv.transpose(0,2,1,3) - goovv.transpose(0,3,1,2)
doooo = goooo.transpose(0,2,1,3) - goooo.transpose(0,3,1,2)
dvvvv = gvvvv.transpose(0,2,1,3) - gvvvv.transpose(0,3,1,2)
dovvo = govvo.transpose(0,2,1,3)
dooov = gooov.transpose(0,2,1,3) - gooov.transpose(1,2,0,3)
dovvv = govvv.transpose(0,2,1,3) - govvv.transpose(0,3,1,2)
doovv =-dovvo.transpose(0,3,2,1)
dvvov = None
dOVOV = gOOVV.transpose(0,2,1,3) - gOOVV.transpose(0,3,1,2)
dOOOO = gOOOO.transpose(0,2,1,3) - gOOOO.transpose(0,3,1,2)
dVVVV = gVVVV.transpose(0,2,1,3) - gVVVV.transpose(0,3,1,2)
dOVVO = gOVVO.transpose(0,2,1,3)
dOOOV = gOOOV.transpose(0,2,1,3) - gOOOV.transpose(1,2,0,3)
dOVVV = gOVVV.transpose(0,2,1,3) - gOVVV.transpose(0,3,1,2)
dOOVV =-dOVVO.transpose(0,3,2,1)
dVVOV = None
dovOV = goOvV.transpose(0,2,1,3)
dooOO = goOoO.transpose(0,2,1,3)
dvvVV = gvVvV.transpose(0,2,1,3)
dovVO = goVvO.transpose(0,2,1,3)
dooOV = goOoV.transpose(0,2,1,3)
dovVV = goVvV.transpose(0,2,1,3)
dooVV = goVoV.transpose(0,2,1,3)
dooVV = -(dooVV + dooVV.transpose(1,0,3,2).conj()) * .5
dvvOV = None
dOVov = None
dOOoo = None
dVVvv = None
dOVvo = dovVO.transpose(3,2,1,0).conj()
dOOov = gOoOv.transpose(0,2,1,3)
dOVvv = gOvVv.transpose(0,2,1,3)
dOOvv = gOvOv.transpose(0,2,1,3)
dOOvv =-(dOOvv + dOOvv.transpose(1,0,3,2).conj()) * .5
dVVov = None
return ((dovov, dovOV, dOVov, dOVOV),
(dvvvv, dvvVV, dVVvv, dVVVV),
(doooo, dooOO, dOOoo, dOOOO),
(doovv, dooVV, dOOvv, dOOVV),
(dovvo, dovVO, dOVvo, dOVVO),
(dvvov, dvvOV, dVVov, dVVOV),
(dovvv, dovVV, dOVvv, dOVVV),
(dooov, dooOV, dOOov, dOOOV))
def trans_rdm1(myci, cibra, ciket, nmo=None, nocc=None):
r'''
One-particle spin density matrices dm1a, dm1b in MO basis (the
occupied-virtual blocks due to the orbital response contribution are not
included).
dm1a[p,q] = <q_alpha^\dagger p_alpha>
dm1b[p,q] = <q_beta^\dagger p_beta>
The convention of 1-pdm is based on McWeeney's book, Eq (5.4.20).
'''
if nmo is None: nmo = myci.nmo
if nocc is None: nocc = myci.nocc
c0bra, c1bra, c2bra = myci.cisdvec_to_amplitudes(cibra, nmo, nocc)
c0ket, c1ket, c2ket = myci.cisdvec_to_amplitudes(ciket, nmo, nocc)
nmoa, nmob = nmo
nocca, noccb = nocc
bra1a, bra1b = c1bra
bra2aa, bra2ab, bra2bb = c2bra
ket1a, ket1b = c1ket
ket2aa, ket2ab, ket2bb = c2ket
dvoa = c0bra.conj() * ket1a.T
dvob = c0bra.conj() * ket1b.T
dvoa += numpy.einsum('jb,ijab->ai', bra1a.conj(), ket2aa)
dvoa += numpy.einsum('jb,ijab->ai', bra1b.conj(), ket2ab)
dvob += numpy.einsum('jb,ijab->ai', bra1b.conj(), ket2bb)
dvob += numpy.einsum('jb,jiba->ai', bra1a.conj(), ket2ab)
dova = c0ket * bra1a.conj()
dovb = c0ket * bra1b.conj()
dova += numpy.einsum('jb,ijab->ia', ket1a.conj(), bra2aa)
dova += numpy.einsum('jb,ijab->ia', ket1b.conj(), bra2ab)
dovb += numpy.einsum('jb,ijab->ia', ket1b.conj(), bra2bb)
dovb += numpy.einsum('jb,jiba->ia', ket1a.conj(), bra2ab)
dooa =-numpy.einsum('ia,ka->ik', bra1a.conj(), ket1a)
doob =-numpy.einsum('ia,ka->ik', bra1b.conj(), ket1b)
dooa -= numpy.einsum('ijab,ikab->jk', bra2aa.conj(), ket2aa) * .5
dooa -= numpy.einsum('jiab,kiab->jk', bra2ab.conj(), ket2ab)
doob -= numpy.einsum('ijab,ikab->jk', bra2bb.conj(), ket2bb) * .5
doob -= numpy.einsum('ijab,ikab->jk', bra2ab.conj(), ket2ab)
dvva = numpy.einsum('ia,ic->ac', ket1a, bra1a.conj())
dvvb = numpy.einsum('ia,ic->ac', ket1b, bra1b.conj())
dvva += numpy.einsum('ijab,ijac->bc', ket2aa, bra2aa.conj()) * .5
dvva += numpy.einsum('ijba,ijca->bc', ket2ab, bra2ab.conj())
dvvb += numpy.einsum('ijba,ijca->bc', ket2bb, bra2bb.conj()) * .5
dvvb += numpy.einsum('ijab,ijac->bc', ket2ab, bra2ab.conj())
dm1a = numpy.empty((nmoa,nmoa), dtype=dooa.dtype)
dm1a[:nocca,:nocca] = dooa
dm1a[:nocca,nocca:] = dova
dm1a[nocca:,:nocca] = dvoa
dm1a[nocca:,nocca:] = dvva
norm = numpy.dot(cibra, ciket)
dm1a[numpy.diag_indices(nocca)] += norm
dm1b = numpy.empty((nmob,nmob), dtype=dooa.dtype)
dm1b[:noccb,:noccb] = doob
dm1b[:noccb,noccb:] = dovb
dm1b[noccb:,:noccb] = dvob
dm1b[noccb:,noccb:] = dvvb
dm1b[numpy.diag_indices(noccb)] += norm
if myci.frozen is not None:
nmoa = myci.mo_occ[0].size
nmob = myci.mo_occ[1].size
nocca = numpy.count_nonzero(myci.mo_occ[0] > 0)
noccb = numpy.count_nonzero(myci.mo_occ[1] > 0)
rdm1a = numpy.zeros((nmoa,nmoa), dtype=dm1a.dtype)
rdm1b = numpy.zeros((nmob,nmob), dtype=dm1b.dtype)
rdm1a[numpy.diag_indices(nocca)] = norm
rdm1b[numpy.diag_indices(noccb)] = norm
moidx = myci.get_frozen_mask()
moidxa = numpy.where(moidx[0])[0]
moidxb = numpy.where(moidx[1])[0]
rdm1a[moidxa[:,None],moidxa] = dm1a
rdm1b[moidxb[:,None],moidxb] = dm1b
dm1a = rdm1a
dm1b = rdm1b
return dm1a, dm1b
class UCISD(cisd.CISD):
def vector_size(self):
norba, norbb = self.nmo
nocca, noccb = self.nocc
nvira = norba - nocca
nvirb = norbb - noccb
nooa = nocca * (nocca-1) // 2
nvva = nvira * (nvira-1) // 2
noob = noccb * (noccb-1) // 2
nvvb = nvirb * (nvirb-1) // 2
size = (1 + nocca*nvira + noccb*nvirb +
nocca*noccb*nvira*nvirb + nooa*nvva + noob*nvvb)
return size
get_nocc = uccsd.get_nocc
get_nmo = uccsd.get_nmo
get_frozen_mask = uccsd.get_frozen_mask
def get_init_guess(self, eris=None, nroots=1, diag=None):
if eris is None: eris = self.ao2mo(self.mo_coeff)
nocca, noccb = self.nocc
mo_ea, mo_eb = eris.mo_energy
eia_a = mo_ea[:nocca,None] - mo_ea[None,nocca:]
eia_b = mo_eb[:noccb,None] - mo_eb[None,noccb:]
t1a = eris.focka[:nocca,nocca:].conj() / eia_a
t1b = eris.fockb[:noccb,noccb:].conj() / eia_b
eris_ovov = _cp(eris.ovov)
eris_ovOV = _cp(eris.ovOV)
eris_OVOV = _cp(eris.OVOV)
t2aa = eris_ovov.transpose(0,2,1,3) - eris_ovov.transpose(0,2,3,1)
t2bb = eris_OVOV.transpose(0,2,1,3) - eris_OVOV.transpose(0,2,3,1)
t2ab = eris_ovOV.transpose(0,2,1,3).copy()
t2aa = t2aa.conj()
t2ab = t2ab.conj()
t2bb = t2bb.conj()
t2aa /= lib.direct_sum('ia+jb->ijab', eia_a, eia_a)
t2ab /= lib.direct_sum('ia+jb->ijab', eia_a, eia_b)
t2bb /= lib.direct_sum('ia+jb->ijab', eia_b, eia_b)
emp2 = numpy.einsum('iajb,ijab', eris_ovov, t2aa) * .25
emp2 -= numpy.einsum('jaib,ijab', eris_ovov, t2aa) * .25
emp2 += numpy.einsum('iajb,ijab', eris_OVOV, t2bb) * .25
emp2 -= numpy.einsum('jaib,ijab', eris_OVOV, t2bb) * .25
emp2 += numpy.einsum('iajb,ijab', eris_ovOV, t2ab)
self.emp2 = emp2.real
logger.info(self, 'Init t2, MP2 energy = %.15g', self.emp2)
if abs(emp2) < 1e-3 and (abs(t1a).sum()+abs(t1b).sum()) < 1e-3:
t1a = 1e-1 / eia_a
t1b = 1e-1 / eia_b
ci_guess = amplitudes_to_cisdvec(1, (t1a,t1b), (t2aa,t2ab,t2bb))
if nroots > 1:
civec_size = ci_guess.size
ci1_size = t1a.size + t1b.size
dtype = ci_guess.dtype
nroots = min(ci1_size+1, nroots)
if diag is None:
idx = range(1, nroots)
else:
idx = diag[:ci1_size+1].argsort()[1:nroots] # exclude HF determinant
ci_guess = [ci_guess]
for i in idx:
g = numpy.zeros(civec_size, dtype)
g[i] = 1.0
ci_guess.append(g)
return self.emp2, ci_guess
contract = contract
make_diagonal = make_diagonal
_dot = None
_add_vvvv = uccsd._add_vvvv
def ao2mo(self, mo_coeff=None):
nmoa, nmob = self.get_nmo()
nao = self.mo_coeff[0].shape[0]
nmo_pair = nmoa * (nmoa+1) // 2
nao_pair = nao * (nao+1) // 2
mem_incore = (max(nao_pair**2, nmoa**4) + nmo_pair**2) * 8/1e6
mem_now = lib.current_memory()[0]
if (self._scf._eri is not None and
(mem_incore+mem_now < self.max_memory) or self.mol.incore_anyway):
return uccsd._make_eris_incore(self, mo_coeff)
elif getattr(self._scf, 'with_df', None):
raise NotImplementedError
else:
return uccsd._make_eris_outcore(self, mo_coeff)
def to_fcivec(self, cisdvec, nmo=None, nocc=None):
return to_fcivec(cisdvec, nmo, nocc)
def from_fcivec(self, fcivec, nmo=None, nocc=None):
return from_fcivec(fcivec, nmo, nocc)
def amplitudes_to_cisdvec(self, c0, c1, c2):
return amplitudes_to_cisdvec(c0, c1, c2)
def cisdvec_to_amplitudes(self, civec, nmo=None, nocc=None):
if nmo is None: nmo = self.nmo
if nocc is None: nocc = self.nocc
return cisdvec_to_amplitudes(civec, nmo, nocc)
make_rdm1 = make_rdm1
make_rdm2 = make_rdm2
trans_rdm1 = trans_rdm1
def nuc_grad_method(self):
from pyscf.grad import ucisd
return ucisd.Gradients(self)
CISD = UCISD
from pyscf import scf
scf.uhf.UHF.CISD = lib.class_as_method(CISD)
def _cp(a):
return numpy.array(a, copy=False, order='C')
if __name__ == '__main__':
from pyscf import gto
mol = gto.Mole()
mol.verbose = 0
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.basis = {'H': 'sto-3g',
'O': 'sto-3g',}
# mol.build()
# mf = scf.UHF(mol).run(conv_tol=1e-14)
# myci = CISD(mf)
# eris = myci.ao2mo()
# ecisd, civec = myci.kernel(eris=eris)
# print(ecisd - -0.048878084082066106)
#
# nmoa = mf.mo_energy[0].size
# nmob = mf.mo_energy[1].size
# rdm1 = myci.make_rdm1(civec)
# rdm2 = myci.make_rdm2(civec)
# eri_aa = ao2mo.kernel(mf._eri, mf.mo_coeff[0], compact=False).reshape([nmoa]*4)
# eri_bb = ao2mo.kernel(mf._eri, mf.mo_coeff[1], compact=False).reshape([nmob]*4)
# eri_ab = ao2mo.kernel(mf._eri, [mf.mo_coeff[0], mf.mo_coeff[0],
# mf.mo_coeff[1], mf.mo_coeff[1]], compact=False)
# eri_ab = eri_ab.reshape(nmoa,nmoa,nmob,nmob)
# h1a = reduce(numpy.dot, (mf.mo_coeff[0].T, mf.get_hcore(), mf.mo_coeff[0]))
# h1b = reduce(numpy.dot, (mf.mo_coeff[1].T, mf.get_hcore(), mf.mo_coeff[1]))
# e2 = (numpy.einsum('ij,ji', h1a, rdm1[0]) +
# numpy.einsum('ij,ji', h1b, rdm1[1]) +
# numpy.einsum('ijkl,ijkl', eri_aa, rdm2[0]) * .5 +
# numpy.einsum('ijkl,ijkl', eri_ab, rdm2[1]) +
# numpy.einsum('ijkl,ijkl', eri_bb, rdm2[2]) * .5)
# print(ecisd + mf.e_tot - mol.energy_nuc() - e2) # = 0
#
# print(abs(rdm1[0] - (numpy.einsum('ijkk->ji', rdm2[0]) +
# numpy.einsum('ijkk->ji', rdm2[1]))/(mol.nelectron-1)).sum())
# print(abs(rdm1[1] - (numpy.einsum('ijkk->ji', rdm2[2]) +
# numpy.einsum('kkij->ji', rdm2[1]))/(mol.nelectron-1)).sum())
if 1:
from pyscf.ci import ucisd
from pyscf import fci
nmo = 8
nocc = nocca, noccb = (4,3)
numpy.random.seed(2)
nvira, nvirb = nmo-nocca, nmo-noccb
cibra = ucisd.amplitudes_to_cisdvec(numpy.random.rand(1),
(numpy.random.rand(nocca,nvira),
numpy.random.rand(noccb,nvirb)),
(numpy.random.rand(nocca,nocca,nvira,nvira),
numpy.random.rand(nocca,noccb,nvira,nvirb),
numpy.random.rand(noccb,noccb,nvirb,nvirb)))
ciket = ucisd.amplitudes_to_cisdvec(numpy.random.rand(1),
(numpy.random.rand(nocca,nvira),
numpy.random.rand(noccb,nvirb)),
(numpy.random.rand(nocca,nocca,nvira,nvira),
numpy.random.rand(nocca,noccb,nvira,nvirb),
numpy.random.rand(noccb,noccb,nvirb,nvirb)))
fcibra = ucisd.to_fcivec(cibra, nmo, nocc)
fciket = ucisd.to_fcivec(ciket, nmo, nocc)
s_mo = (numpy.random.random((nmo,nmo)),
numpy.random.random((nmo,nmo)))
s_mo = (s_mo[0], s_mo[0])
s0 = fci.addons.overlap(fcibra, fciket, nmo, nocc, s_mo)
s1 = ucisd.overlap(cibra, ciket, nmo, nocc, s_mo)
print(s1, s0, 9)
|
sunqm/pyscf
|
pyscf/ci/ucisd.py
|
Python
|
apache-2.0
| 42,117
|
[
"PySCF"
] |
1ae9699ca22af8913961ed62cd4dc3ee769cefdee0d3a19ecdf3d6cdb3a2b9e8
|
try:
import wpilib
except ImportError:
from pyfrc import wpilib
# import components here
from autonomous import AutonomousModeManager
from components import drive, intake, catapult
from common import delay
# keep in sync with the driver station
MODE_DISABLED = 0
MODE_AUTONOMOUS = 1
MODE_TELEOPERATED = 2
class MyRobot(wpilib.SimpleRobot):
'''
This is where it all starts
'''
def __init__ (self):
'''
Constructor.
'''
super().__init__()
print("Team 1418 robot code for 2014")
#################################################################
# THIS CODE IS SHARED BETWEEN THE MAIN ROBOT AND THE ELECTRICAL #
# TEST CODE. WHEN CHANGING IT, CHANGE BOTH PLACES! #
#################################################################
wpilib.SmartDashboard.init()
# Joysticks
self.joystick1 = wpilib.Joystick(1)
self.joystick2 = wpilib.Joystick(2)
# Motors
self.lf_motor = wpilib.Jaguar(1)
self.lf_motor.label = 'lf_motor'
self.lr_motor = wpilib.Jaguar(2)
self.lr_motor.label = 'lr_motor'
self.rr_motor = wpilib.Jaguar(3)
self.rr_motor.label = 'rr_motor'
self.rf_motor = wpilib.Jaguar(4)
self.rf_motor.label = 'rf_motor'
self.winch_motor = wpilib.CANJaguar(5)
self.winch_motor.label = 'winch'
self.intake_motor = wpilib.Jaguar(6)
self.intake_motor.label = 'intake'
# Catapult gearbox control
self.gearbox_solenoid=wpilib.DoubleSolenoid(2, 1)
self.gearbox_solenoid.label = 'gearbox'
# Arm up/down control
self.vent_bottom_solenoid = wpilib.Solenoid(3)
self.vent_bottom_solenoid.label = 'vent bottom'
self.fill_bottom_solenoid = wpilib.Solenoid(4)
self.fill_bottom_solenoid.label = 'fill bottom'
self.fill_top_solenoid = wpilib.Solenoid(5)
self.fill_top_solenoid.label = 'fill top'
self.vent_top_solenoid = wpilib.Solenoid(6)
self.vent_top_solenoid.label = 'vent top'
self.pass_solenoid = wpilib.Solenoid(7)
self.pass_solenoid.label = 'pass'
self.robot_drive = wpilib.RobotDrive(self.lr_motor, self.rr_motor, self.lf_motor, self.rf_motor)
self.robot_drive.SetSafetyEnabled(False)
self.robot_drive.SetInvertedMotor(wpilib.RobotDrive.kFrontLeftMotor, True)
self.robot_drive.SetInvertedMotor(wpilib.RobotDrive.kRearLeftMotor, True)
# Sensors
self.gyro = wpilib.Gyro(1)
self.ultrasonic_sensor = wpilib.AnalogChannel(3)
self.ultrasonic_sensor.label = 'Ultrasonic'
self.arm_angle_sensor = wpilib.AnalogChannel(4)
self.arm_angle_sensor.label = 'Arm angle'
self.ball_sensor = wpilib.AnalogChannel(6)
self.ball_sensor.label = 'Ball sensor'
self.accelerometer = wpilib.ADXL345_I2C(1, wpilib.ADXL345_I2C.kRange_2G)
self.compressor = wpilib.Compressor(1,1)
#################################################################
# END SHARED CODE #
#################################################################
#
# Initialize robot components here
#
self.drive = drive.Drive(self.robot_drive, self.ultrasonic_sensor,self.gyro)
self.initSmartDashboard()
self.pushTimer=wpilib.Timer()
self.catapultTimer=wpilib.Timer()
self.catapult=catapult.Catapult(self.winch_motor,self.gearbox_solenoid,self.pass_solenoid,self.arm_angle_sensor,self.ball_sensor,self.catapultTimer)
self.intakeTimer=wpilib.Timer()
self.intake=intake.Intake(self.vent_top_solenoid,self.fill_top_solenoid,self.fill_bottom_solenoid,self.vent_bottom_solenoid,self.intake_motor,self.intakeTimer)
self.pulldowntoggle=False
self.components = {
'drive': self.drive,
'catapult': self.catapult,
'intake': self.intake
}
self.control_loop_wait_time = 0.025
self.autonomous = AutonomousModeManager(self.components)
def Autonomous(self):
'''Called when the robot is in autonomous mode'''
wpilib.SmartDashboard.PutNumber('RobotMode', MODE_AUTONOMOUS)
self.autonomous.run(self, self.control_loop_wait_time)
def Disabled(self):
'''Called when the robot is in disabled mode'''
wpilib.SmartDashboard.PutNumber('RobotMode', MODE_DISABLED)
while self.IsDisabled():
self.communicateWithSmartDashboard(True)
wpilib.Wait(0.01)
def OperatorControl(self):
'''Called when the robot is in Teleoperated mode'''
wpilib.SmartDashboard.PutNumber('RobotMode', MODE_TELEOPERATED)
dog = self.GetWatchdog()
dog.SetExpiration(0.25)
dog.SetEnabled(True)
self.compressor.Start()
preciseDelay = delay.PreciseDelay(self.control_loop_wait_time)
while self.IsOperatorControl()and self.IsEnabled():
self.robotMode=1
dog.Feed()
#
# Driving
#
if self.joystick2.GetZ()==1:
self.drive.move((-1)*self.joystick1.GetX(), self.joystick1.GetY(), self.joystick2.GetX())
else:
self.drive.move(self.joystick1.GetX(), (-1)*self.joystick1.GetY(), self.joystick2.GetX())
# Intake
#
if self.joystick1.GetRawButton(2):
self.intake.armDown()
if self.joystick1.GetRawButton(3):
self.intake.armUp()
if self.joystick1.GetRawButton(5):
self.intake.ballIn()
if self.joystick1.GetRawButton(4):
self.intake.ballOut()
if self.joystick1.GetRawButton(6):
self.drive.angle_rotation(-10)
if self.joystick1.GetRawButton(7):
self.drive.angle_rotation(10)
#
# Catapult
#
if wpilib.SmartDashboard.GetBoolean("AutoWinch"):
self.catapult.autoWinch()
if self.joystick2.GetRawButton(1):
self.catapult.launchNoSensor()
if self.joystick1.GetRawButton(1):
self.catapult.pulldownNoSensor()
#
# Other
#
self.communicateWithSmartDashboard(False)
self.update()
preciseDelay.wait()
# Disable the watchdog at the end
dog.SetEnabled(False)
# only run the compressor in teleoperated mode
self.compressor.Stop()
def update(self):
'''This function calls all of the doit functions for each component'''
for component in self.components.values():
component.doit()
def initSmartDashboard(self):
self.sdTimer = wpilib.Timer()
self.sdTimer.Start()
wpilib.SmartDashboard.PutBoolean("AutoWinch", False)
wpilib.SmartDashboard.PutBoolean("EnableTuning", False)
wpilib.SmartDashboard.PutNumber("FirePower", 100)
wpilib.SmartDashboard.PutNumber("ArmSet", 0)
wpilib.SmartDashboard.PutBoolean("Fire", False)
wpilib.SmartDashboard.PutBoolean("GyroEnabled", True)
wpilib.SmartDashboard.PutNumber("GyroAngle",self.gyro.GetAngle())
wpilib.SmartDashboard.PutNumber("Compressor", self.compressor.GetPressureSwitchValue())
wpilib.SmartDashboard.PutNumber("AngleConstant", self.drive.angle_constant)
print (self.compressor.GetPressureSwitchValue())
def communicateWithSmartDashboard(self, in_disabled):
'''Sends and recieves values to/from the SmartDashboard'''
# only send values every once in awhile
if self.sdTimer.HasPeriodPassed(0.1):
# Send the distance to the driver station
wpilib.SmartDashboard.PutNumber("Distance",self.ultrasonic_sensor.GetVoltage())
wpilib.SmartDashboard.PutNumber("GyroAngle",self.gyro.GetAngle())
# Battery can actually be done dashboard side, fix that self (Shayne)
# Put the arm state
wpilib.SmartDashboard.PutNumber("ArmState",self.intake.GetMode())
# Get if a ball is loaded
wpilib.SmartDashboard.PutBoolean("BallLoaded", self.catapult.check_ready())
wpilib.SmartDashboard.PutNumber("ShootAngle",self.catapult.getCatapultLocation())
wpilib.SmartDashboard.PutNumber("Compressor", self.compressor.GetPressureSwitchValue())
# don't remove this, this allows us to disable the gyro
self.drive.set_gyro_enabled(wpilib.SmartDashboard.GetBoolean('GyroEnabled'))
# don't set any of the other variables in disabled mode!
if in_disabled:
return
# Get the number to set the winch power
#self.WinchPowerVar = wpilib.SmartDashboard.PutNumber("FirePower",1)
# TODO: Cleanup catapult.py and finish this
self.drive.set_angle_constant(wpilib.SmartDashboard.GetNumber('AngleConstant'))
# If its 0 then update the arm state
arm_state = wpilib.SmartDashboard.GetNumber("ArmSet")
if arm_state != 0:
self.intake.SetMode(arm_state)
wpilib.SmartDashboard.PutNumber("ArmSet", 0)
# 0 it to avoid locking the driver out of arm controls
if wpilib.SmartDashboard.GetBoolean("Fire"):
self.catapult.launchNoSensor()
wpilib.SmartDashboard.PutBoolean("Fire", False)
self.catapult.setWinchLocation(wpilib.SmartDashboard.GetNumber('FirePower'))
def run():
'''
When the robot starts, this is the very first function that
gets called
:returns: a new instance of the `MyRobot` class
'''
robot = MyRobot()
robot.StartCompetition()
return robot
if __name__ == '__main__':
if not hasattr(wpilib, 'require_version'):
print("ERROR: You must have pyfrc 2014.7.3 or above installed!") # pragma: no cover
else:
wpilib.require_version('2014.7.3')
import physics
wpilib.internal.physics_controller.setup(physics)
wpilib.run()
|
frc1418/2014
|
robot/robot/src/robot.py
|
Python
|
bsd-3-clause
| 11,280
|
[
"Jaguar"
] |
f4de5422dcb9fb7713a25e7374065771b6d1a2174ec32d498ad470a80c825a7e
|
# Copyright 2012 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia
# This file is part of seq_crumbs.
# seq_crumbs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# seq_crumbs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with seq_crumbs. If not, see <http://www.gnu.org/licenses/>.
import unittest
import os.path
from tempfile import NamedTemporaryFile
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
from crumbs.blast import (do_blast, BlasterForFewSubjects,
get_or_create_blastdb, _blastdb_exists, Blaster)
from crumbs.utils.file_utils import TemporaryDir
from crumbs.settings import get_setting
from crumbs.utils.test_utils import TEST_DATA_DIR
from crumbs.utils.tags import NUCL, SEQITEM, SEQRECORD
from crumbs.seq.seq import SeqWrapper, SeqItem, assing_kind_to_seqs
TITANIUM_LINKER = get_setting('TITANIUM_LINKER')
FLX_LINKER = get_setting('FLX_LINKER')
# pylint: disable=R0201
# pylint: disable=R0904
# pylint: disable=C0111
class BlastTest(unittest.TestCase):
'It tests the blast infrastructure'
@staticmethod
def test_blastdb():
'It creates a blast database.'
db_name = 'arabidopsis_genes'
seq_fpath = os.path.join(TEST_DATA_DIR, db_name)
db_dir = TemporaryDir(prefix='blast_dbs_')
try:
db_path1 = get_or_create_blastdb(seq_fpath, directory=db_dir.name,
dbtype='nucl')
db_path = os.path.join(db_dir.name, db_name)
assert 'CATAGGGTCACCAATGGC' in open(db_path1).read(100)
assert db_path1 == db_path
assert os.path.exists(db_path)
index_fpath = os.path.join(db_dir.name, db_name + '.nsq')
assert os.path.exists(index_fpath)
finally:
db_dir.close()
def test_blast_search(self):
'It does a blast search'
db_name = 'arabidopsis_genes'
seq_fpath = os.path.join(TEST_DATA_DIR, db_name)
db_dir = TemporaryDir(prefix='blast_dbs_')
try:
db_fpath = get_or_create_blastdb(seq_fpath, directory=db_dir.name,
dbtype='nucl')
query_fhand = NamedTemporaryFile()
query_fhand.write(open(seq_fpath).read(200))
query_fhand.flush()
out_fhand = NamedTemporaryFile()
do_blast(seq_fpath, db_fpath, program='blastn',
out_fpath=out_fhand.name)
assert '</BlastOutput>' in open(out_fhand.name).read()
finally:
db_dir.close()
def xtest_remote_blast(self):
'It does a remote blast search'
seq_fhand = NamedTemporaryFile()
fasta = '>seq1\nCTAGTCTAGTCGTAGTCATGGCTGTAGTCTAGTCTACGATTCGTATCAGTTGT'
fasta += 'GTGACATCGATCATGTTGTATTGTGTACTATACACACACGTAGGTCGACTATCGTAGC\n'
seq_fhand.write(fasta)
seq_fhand.flush()
out_fhand = NamedTemporaryFile()
do_blast(seq_fhand.name, 'nt', program='blastn',
out_fpath=out_fhand.name, remote=True)
assert '</BlastOutput>' in open(out_fhand.name).read()
# fail if outfmt is not xml
try:
params = {'outfmt': 'txt'}
do_blast(seq_fhand.name, 'nt', program='blastn',
out_fpath=out_fhand.name, remote=True, params=params)
self.fail()
except RuntimeError:
pass
@staticmethod
def test_get_or_create_blastdb():
blastdb = os.path.join(TEST_DATA_DIR, 'arabidopsis_genes')
directory = TemporaryDir()
assert not _blastdb_exists(blastdb, NUCL)
get_or_create_blastdb(blastdb, NUCL, directory.name)
new_blast_path = os.path.join(directory.name,
os.path.basename(blastdb))
assert _blastdb_exists(new_blast_path, NUCL)
get_or_create_blastdb(blastdb, NUCL, directory.name)
assert _blastdb_exists(new_blast_path, NUCL)
directory.close()
# already exists
blastdb = os.path.join(TEST_DATA_DIR, 'blastdbs', 'arabidopsis_genes')
assert _blastdb_exists(blastdb, NUCL)
get_or_create_blastdb(blastdb, NUCL)
assert _blastdb_exists(blastdb, NUCL)
def create_a_matepair_file():
'It creates a matepair fasta file'
seq_5 = 'CTAGTCTAGTCGTAGTCATGGCTGTAGTCTAGTCTACGATTCGTATCAGTTGTGTGAC'
seq_3 = 'ATCGATCATGTTGTATTGTGTACTATACACACACGTAGGTCGACTATCGTAGCTAGT'
mate_seq = seq_5 + TITANIUM_LINKER + seq_3
mate_fhand = NamedTemporaryFile(suffix='.fasta')
mate_fhand.write('>seq1\n' + mate_seq + '\n')
mate_fhand.flush()
return mate_fhand
class BlastMaterTest(unittest.TestCase):
'It tests the splitting of mate pairs'
def test_matching_segments(self):
'It tests the detection of oligos in sequence files'
seq_5 = 'CTAGTCTAGTCGTAGTCATGGCTGTAGTCTAGTCTACGATTCGTATCAGTTGTGTGAC'
mate_fhand = create_a_matepair_file()
linkers = [SeqItem('titan', ['>titan\n', TITANIUM_LINKER + '\n']),
SeqItem('flx', ['>flx\n', FLX_LINKER + '\n'])]
linkers = assing_kind_to_seqs(SEQITEM, linkers, 'fasta')
expected_region = (len(seq_5), len(seq_5 + TITANIUM_LINKER) - 1)
matcher = BlasterForFewSubjects(mate_fhand.name, linkers,
program='blastn',
elongate_for_global=True)
linker_region = matcher.get_matched_segments_for_read('seq1')[0]
assert [expected_region] == linker_region
class BlasterTest(unittest.TestCase):
def xtest_blaster(self):
seq = 'GAGAAATTCCTTTGGAAGTTATTCCGTAGCATAAGAGCTGAAACTTCAGAGCAAGTTT'
seq += 'TCATTGGGCAAAATGGGGGAACAACCTATCTTCAGCACTCGAGCTCATGTCTTCCAAATTGA'
seq += 'CCCAAACACAAAGAAGAACTGGGTACCCACCAGCAAGCATGCAGTTACTGTGTCTTATTTCT'
seq += 'ATGACAGCACAAGAAATGTGTATAGGATAATCAGTTTAGATGGCTCAAAGGCAATAATAAAT'
seq += 'AGTACCATCACCCCAAACATGACA'
seqrec = SeqWrapper(SEQRECORD, SeqRecord(Seq(seq), id='seq'), None)
blaster = Blaster([seqrec], 'nr', 'blastn', remote=True)
print blaster.get_matched_segments('seq')
assert blaster.get_matched_segments('seq') == [(1, 1740)]
if __name__ == '__main__':
#import sys;sys.argv = ['', 'BlastTest.test_get_or_create_blastdb']
unittest.main()
|
JoseBlanca/seq_crumbs
|
test/seq/test_blast.py
|
Python
|
gpl-3.0
| 6,804
|
[
"BLAST"
] |
7b1365abf08b875aa52c61eb2bde716be249684c53900a2f9e639e443ce2d245
|
#pylint: disable=missing-docstring
#################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
#################################################################
import mooseutils
from .. import misc
class ExodusColorBar(misc.ColorBar):
"""
ColorBar designed to work with ExodusResult objects.
Inputs:
result0: The ExodusResult objects for the primary axis.
result1: (Optional) The ExodusResult for the secondary axis.
"""
AXIS_NAMES = ['primary', 'secondary']
@staticmethod
def getOptions():
opt = misc.ColorBar.getOptions()
opt.setDefault('viewport', None)
return opt
def __init__(self, *results, **kwargs):
super(ExodusColorBar, self).__init__(**kwargs)
self._results = results
if len(results) not in [1, 2]:
raise mooseutils.MooseException('One or two ExodusResult objects must be supplied to '
'the ExodusColorBar')
def setOptions(self, *args, **kwargs):
"""
Update the supplied options and apply the colormap options from the ExodusResult.
"""
opts = ['cmap', 'cmap_reverse', 'cmap_num_colors', 'cmap_range']
cmap_options = {key:self._results[0].getOption(key) for key in opts}
kwargs.update(cmap_options)
super(ExodusColorBar, self).setOptions(*args, **kwargs)
def needsUpdate(self):
"""
Check if the result ranges has changed.
"""
for i, result in enumerate(self._results):
axis_options = self.getOption(self.AXIS_NAMES[i])
rng = result[0].getVTKMapper().GetScalarRange()
if rng != axis_options['lim']:
return True
return super(ExodusColorBar, self).needsUpdate() or \
any([result.needsUpdate() for result in self._results])
def update(self, **kwargs):
"""
Extracts the settings from the ExodusResult object to define the correct settings for the
colorbar.
"""
# Set the options provided
self.setOptions(**kwargs)
if self.needsInitialize():
self.initialize()
# The results must be updated for the settings to be applied below
for result in self._results:
if result.needsUpdate():
result.update()
# Enable the secondary if two results provided
if len(self._results) == 2:
self.getOption(self.AXIS_NAMES[1])['visible'] = True
# Apply settings from results
for i, result in enumerate(self._results):
# Set the range for the axis' and titles
axis_options = self.getOption(self.AXIS_NAMES[i])
axis_options['lim'] = list(result[0].getVTKMapper().GetScalarRange())
if not axis_options.isOptionValid('title'):
self._sources[i+1].getVTKSource().SetTitle(result[0].getVTKMapper().GetArrayName())
# Viewport
if not self.isOptionValid('viewport'):
self.setOption('viewport', result.getOption('viewport'))
self.setOption('layer', result.getOption('layer'))
super(ExodusColorBar, self).update(**kwargs)
|
Chuban/moose
|
python/chigger/exodus/ExodusColorBar.py
|
Python
|
lgpl-2.1
| 3,953
|
[
"MOOSE"
] |
a5929dd6795bd3980f977293ad277c46837c3328181826dd78860867855ea1b4
|
# mexception.py ---
#
# Filename: mexception.py
# Description: Implements some custom exceptions
# Author: Subhasis Ray
# Maintainer:
# Created: Fri Apr 19 14:34:51 2013 (+0530)
# Version:
# Last-Updated: Wed May 22 14:25:18 2013 (+0530)
# By: subha
# Update #: 22
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
"""Exception for MOOSE. All error-level exceptions must be derived
from MooseError. Otherwise they will not be handled by system error
handler which displays a message box.
"""
class MooseInfo(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class MooseWarning(Warning):
def __init__(self, *args, **kwargs):
Warning.__init__(self, *args, **kwargs)
class MooseError(StandardError):
def __init__(self, *args, **kwargs):
StandardError.__init__(self, *args, **kwargs)
class FileLoadError(MooseError):
def __init__(self, *args, **kwargs):
StandardError.__init__(self, *args, **kwargs)
class ElementNameError(MooseError):
def __init__(self, *args, **kwargs):
StandardError.__init__(self, *args, **kwargs)
#
# mexception.py ends here
|
dilawar/moose-full
|
moose-gui/mexception.py
|
Python
|
gpl-2.0
| 1,959
|
[
"MOOSE"
] |
4b565234fb0654d73e28ceadbbd6ae77525de2bbed6b63dae63675ab88a498ed
|
"""
Test the about xblock
"""
import datetime
import pytz
from ccx_keys.locator import CCXLocator
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from mock import patch
from nose.plugins.attrib import attr
from course_modes.models import CourseMode
from track.tests import EventTrackingTestCase
from xmodule.modulestore.tests.django_utils import TEST_DATA_MIXED_MODULESTORE
from xmodule.modulestore.tests.utils import TEST_DATA_DIR
from xmodule.modulestore.xml_importer import import_course_from_xml
from student.models import CourseEnrollment
from student.tests.factories import AdminFactory, CourseEnrollmentAllowedFactory, UserFactory
from shoppingcart.models import Order, PaidCourseRegistration
from xmodule.course_module import CATALOG_VISIBILITY_ABOUT, CATALOG_VISIBILITY_NONE
from xmodule.modulestore.tests.django_utils import (
ModuleStoreTestCase,
SharedModuleStoreTestCase,
TEST_DATA_SPLIT_MODULESTORE
)
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from util.milestones_helpers import (
set_prerequisite_courses,
get_prerequisite_courses_display,
)
from milestones.tests.utils import MilestonesTestCaseMixin
from lms.djangoapps.ccx.tests.factories import CcxFactory
from .helpers import LoginEnrollmentTestCase
# HTML for registration button
REG_STR = "<form id=\"class_enroll_form\" method=\"post\" data-remote=\"true\" action=\"/change_enrollment\">"
SHIB_ERROR_STR = "The currently logged-in user account does not have permission to enroll in this course."
@attr(shard=1)
class AboutTestCase(LoginEnrollmentTestCase, SharedModuleStoreTestCase, EventTrackingTestCase, MilestonesTestCaseMixin):
"""
Tests about xblock.
"""
@classmethod
def setUpClass(cls):
super(AboutTestCase, cls).setUpClass()
cls.course = CourseFactory.create()
cls.course_without_about = CourseFactory.create(catalog_visibility=CATALOG_VISIBILITY_NONE)
cls.course_with_about = CourseFactory.create(catalog_visibility=CATALOG_VISIBILITY_ABOUT)
cls.purchase_course = CourseFactory.create(org='MITx', number='buyme', display_name='Course To Buy')
cls.about = ItemFactory.create(
category="about", parent_location=cls.course.location,
data="OOGIE BLOOGIE", display_name="overview"
)
cls.about = ItemFactory.create(
category="about", parent_location=cls.course_without_about.location,
data="WITHOUT ABOUT", display_name="overview"
)
cls.about = ItemFactory.create(
category="about", parent_location=cls.course_with_about.location,
data="WITH ABOUT", display_name="overview"
)
def setUp(self):
super(AboutTestCase, self).setUp()
self.course_mode = CourseMode(
course_id=self.purchase_course.id,
mode_slug=CourseMode.DEFAULT_MODE_SLUG,
mode_display_name=CourseMode.DEFAULT_MODE_SLUG,
min_price=10
)
self.course_mode.save()
def test_anonymous_user(self):
"""
This test asserts that a non-logged in user can visit the course about page
"""
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
# Check that registration button is present
self.assertIn(REG_STR, resp.content)
def test_logged_in(self):
"""
This test asserts that a logged-in user can visit the course about page
"""
self.setup_user()
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
def test_already_enrolled(self):
"""
Asserts that the end user sees the appropriate messaging
when he/she visits the course about page, but is already enrolled
"""
self.setup_user()
self.enroll(self.course, True)
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("You are enrolled in this course", resp.content)
self.assertIn("View Course", resp.content)
@override_settings(COURSE_ABOUT_VISIBILITY_PERMISSION="see_about_page")
def test_visible_about_page_settings(self):
"""
Verify that the About Page honors the permission settings in the course module
"""
url = reverse('about_course', args=[self.course_with_about.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("WITH ABOUT", resp.content)
url = reverse('about_course', args=[self.course_without_about.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 404)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_logged_in_marketing(self):
self.setup_user()
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
# should be redirected
self.assertEqual(resp.status_code, 302)
# follow this time, and check we're redirected to the course info page
resp = self.client.get(url, follow=True)
target_url = resp.redirect_chain[-1][0]
info_url = reverse('info', args=[self.course.id.to_deprecated_string()])
self.assertTrue(target_url.endswith(info_url))
@patch.dict(settings.FEATURES, {'ENABLE_PREREQUISITE_COURSES': True})
def test_pre_requisite_course(self):
pre_requisite_course = CourseFactory.create(org='edX', course='900', display_name='pre requisite course')
course = CourseFactory.create(pre_requisite_courses=[unicode(pre_requisite_course.id)])
self.setup_user()
url = reverse('about_course', args=[unicode(course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
pre_requisite_courses = get_prerequisite_courses_display(course)
pre_requisite_course_about_url = reverse('about_course', args=[unicode(pre_requisite_courses[0]['key'])])
self.assertIn("<span class=\"important-dates-item-text pre-requisite\"><a href=\"{}\">{}</a></span>"
.format(pre_requisite_course_about_url, pre_requisite_courses[0]['display']),
resp.content.strip('\n'))
@patch.dict(settings.FEATURES, {'ENABLE_PREREQUISITE_COURSES': True})
def test_about_page_unfulfilled_prereqs(self):
pre_requisite_course = CourseFactory.create(
org='edX',
course='901',
display_name='pre requisite course',
)
pre_requisite_courses = [unicode(pre_requisite_course.id)]
# for this failure to occur, the enrollment window needs to be in the past
course = CourseFactory.create(
org='edX',
course='1000',
# closed enrollment
enrollment_start=datetime.datetime(2013, 1, 1),
enrollment_end=datetime.datetime(2014, 1, 1),
start=datetime.datetime(2013, 1, 1),
end=datetime.datetime(2030, 1, 1),
pre_requisite_courses=pre_requisite_courses,
)
set_prerequisite_courses(course.id, pre_requisite_courses)
self.setup_user()
self.enroll(self.course, True)
self.enroll(pre_requisite_course, True)
url = reverse('about_course', args=[unicode(course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
pre_requisite_courses = get_prerequisite_courses_display(course)
pre_requisite_course_about_url = reverse('about_course', args=[unicode(pre_requisite_courses[0]['key'])])
self.assertIn("<span class=\"important-dates-item-text pre-requisite\"><a href=\"{}\">{}</a></span>"
.format(pre_requisite_course_about_url, pre_requisite_courses[0]['display']),
resp.content.strip('\n'))
url = reverse('about_course', args=[unicode(pre_requisite_course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
@attr(shard=1)
class AboutTestCaseXML(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Tests for the course about page
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
def setUp(self):
"""
Set up the tests
"""
super(AboutTestCaseXML, self).setUp()
# The following test course (which lives at common/test/data/2014)
# is closed; we're testing that an about page still appears when
# the course is already closed
self.xml_course_id = self.store.make_course_key('edX', 'detached_pages', '2014')
import_course_from_xml(
self.store,
'test_user',
TEST_DATA_DIR,
source_dirs=['2014'],
static_content_store=None,
target_id=self.xml_course_id,
raise_on_failure=True,
create_if_not_present=True,
)
# this text appears in that course's about page
# common/test/data/2014/about/overview.html
self.xml_data = "about page 463139"
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_logged_in_xml(self):
self.setup_user()
url = reverse('about_course', args=[self.xml_course_id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(self.xml_data, resp.content)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_anonymous_user_xml(self):
url = reverse('about_course', args=[self.xml_course_id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(self.xml_data, resp.content)
@attr(shard=1)
class AboutWithCappedEnrollmentsTestCase(LoginEnrollmentTestCase, SharedModuleStoreTestCase):
"""
This test case will check the About page when a course has a capped enrollment
"""
@classmethod
def setUpClass(cls):
super(AboutWithCappedEnrollmentsTestCase, cls).setUpClass()
cls.course = CourseFactory.create(metadata={"max_student_enrollments_allowed": 1})
cls.about = ItemFactory.create(
category="about", parent_location=cls.course.location,
data="OOGIE BLOOGIE", display_name="overview"
)
def setUp(self):
"""
Set up the tests
"""
super(AboutWithCappedEnrollmentsTestCase, self).setUp()
def test_enrollment_cap(self):
"""
This test will make sure that enrollment caps are enforced
"""
self.setup_user()
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn('<a href="#" class="register">', resp.content)
self.enroll(self.course, verify=True)
# create a new account since the first account is already enrolled in the course
self.email = 'foo_second@test.com'
self.password = 'bar'
self.username = 'test_second'
self.create_account(self.username, self.email, self.password)
self.activate_user(self.email)
self.login(self.email, self.password)
# Get the about page again and make sure that the page says that the course is full
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Course is full", resp.content)
# Try to enroll as well
result = self.enroll(self.course)
self.assertFalse(result)
# Check that registration button is not present
self.assertNotIn(REG_STR, resp.content)
@attr(shard=1)
class AboutWithInvitationOnly(SharedModuleStoreTestCase):
"""
This test case will check the About page when a course is invitation only.
"""
@classmethod
def setUpClass(cls):
super(AboutWithInvitationOnly, cls).setUpClass()
cls.course = CourseFactory.create(metadata={"invitation_only": True})
cls.about = ItemFactory.create(
category="about", parent_location=cls.course.location,
display_name="overview"
)
def setUp(self):
super(AboutWithInvitationOnly, self).setUp()
def test_invitation_only(self):
"""
Test for user not logged in, invitation only course.
"""
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Enrollment in this course is by invitation only", resp.content)
# Check that registration button is not present
self.assertNotIn(REG_STR, resp.content)
def test_invitation_only_but_allowed(self):
"""
Test for user logged in and allowed to enroll in invitation only course.
"""
# Course is invitation only, student is allowed to enroll and logged in
user = UserFactory.create(username='allowed_student', password='test', email='allowed_student@test.com')
CourseEnrollmentAllowedFactory(email=user.email, course_id=self.course.id)
self.client.login(username=user.username, password='test')
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(u"Enroll in {}".format(self.course.id.course), resp.content.decode('utf-8'))
# Check that registration button is present
self.assertIn(REG_STR, resp.content)
@attr(shard=1)
@patch.dict(settings.FEATURES, {'RESTRICT_ENROLL_BY_REG_METHOD': True})
class AboutTestCaseShibCourse(LoginEnrollmentTestCase, SharedModuleStoreTestCase):
"""
Test cases covering about page behavior for courses that use shib enrollment domain ("shib courses")
"""
@classmethod
def setUpClass(cls):
super(AboutTestCaseShibCourse, cls).setUpClass()
cls.course = CourseFactory.create(enrollment_domain="shib:https://idp.stanford.edu/")
cls.about = ItemFactory.create(
category="about", parent_location=cls.course.location,
data="OOGIE BLOOGIE", display_name="overview"
)
def setUp(self):
super(AboutTestCaseShibCourse, self).setUp()
def test_logged_in_shib_course(self):
"""
For shib courses, logged in users will see the enroll button, but get rejected once they click there
"""
self.setup_user()
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
self.assertIn(u"Enroll in {}".format(self.course.id.course), resp.content.decode('utf-8'))
self.assertIn(SHIB_ERROR_STR, resp.content)
self.assertIn(REG_STR, resp.content)
def test_anonymous_user_shib_course(self):
"""
For shib courses, anonymous users will also see the enroll button
"""
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
self.assertIn(u"Enroll in {}".format(self.course.id.course), resp.content.decode('utf-8'))
self.assertIn(SHIB_ERROR_STR, resp.content)
self.assertIn(REG_STR, resp.content)
@attr(shard=1)
class AboutWithClosedEnrollment(ModuleStoreTestCase):
"""
This test case will check the About page for a course that has enrollment start/end
set but it is currently outside of that period.
"""
def setUp(self):
super(AboutWithClosedEnrollment, self).setUp()
self.course = CourseFactory.create(metadata={"invitation_only": False})
# Setup enrollment period to be in future
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=1)
nextday = tomorrow + datetime.timedelta(days=1)
self.course.enrollment_start = tomorrow
self.course.enrollment_end = nextday
self.course = self.update_course(self.course, self.user.id)
self.about = ItemFactory.create(
category="about", parent_location=self.course.location,
display_name="overview"
)
def test_closed_enrollmement(self):
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Enrollment is Closed", resp.content)
# Check that registration button is not present
self.assertNotIn(REG_STR, resp.content)
def test_course_price_is_not_visble_in_sidebar(self):
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
# course price is not visible ihe course_about page when the course
# mode is not set to honor
self.assertNotIn('<span class="important-dates-item-text">$10</span>', resp.content)
@attr(shard=1)
@patch.dict(settings.FEATURES, {'ENABLE_SHOPPING_CART': True})
@patch.dict(settings.FEATURES, {'ENABLE_PAID_COURSE_REGISTRATION': True})
class AboutPurchaseCourseTestCase(LoginEnrollmentTestCase, SharedModuleStoreTestCase):
"""
This test class runs through a suite of verifications regarding
purchaseable courses
"""
@classmethod
def setUpClass(cls):
super(AboutPurchaseCourseTestCase, cls).setUpClass()
cls.course = CourseFactory.create(org='MITx', number='buyme', display_name='Course To Buy')
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=1)
nextday = tomorrow + datetime.timedelta(days=1)
cls.closed_course = CourseFactory.create(
org='MITx',
number='closed',
display_name='Closed Course To Buy',
enrollment_start=tomorrow,
enrollment_end=nextday
)
def setUp(self):
super(AboutPurchaseCourseTestCase, self).setUp()
self._set_ecomm(self.course)
self._set_ecomm(self.closed_course)
def _set_ecomm(self, course):
"""
Helper method to turn on ecommerce on the course
"""
course_mode = CourseMode(
course_id=course.id,
mode_slug=CourseMode.DEFAULT_MODE_SLUG,
mode_display_name=CourseMode.DEFAULT_MODE_SLUG,
min_price=10,
)
course_mode.save()
def test_anonymous_user(self):
"""
Make sure an anonymous user sees the purchase button
"""
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Add buyme to Cart <span>($10 USD)</span>", resp.content)
def test_logged_in(self):
"""
Make sure a logged in user sees the purchase button
"""
self.setup_user()
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Add buyme to Cart <span>($10 USD)</span>", resp.content)
def test_already_in_cart(self):
"""
This makes sure if a user has this course in the cart, that the expected message
appears
"""
self.setup_user()
cart = Order.get_cart_for_user(self.user)
PaidCourseRegistration.add_to_order(cart, self.course.id)
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("This course is in your", resp.content)
self.assertNotIn("Add buyme to Cart <span>($10 USD)</span>", resp.content)
def test_already_enrolled(self):
"""
This makes sure that the already enrolled message appears for paywalled courses
"""
self.setup_user()
# note that we can't call self.enroll here since that goes through
# the Django student views, which doesn't allow for enrollments
# for paywalled courses
CourseEnrollment.enroll(self.user, self.course.id)
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("You are enrolled in this course", resp.content)
self.assertIn("View Course", resp.content)
self.assertNotIn("Add buyme to Cart <span>($10 USD)</span>", resp.content)
def test_closed_enrollment(self):
"""
This makes sure that paywalled courses also honor the registration
window
"""
self.setup_user()
url = reverse('about_course', args=[self.closed_course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Enrollment is Closed", resp.content)
self.assertNotIn("Add closed to Cart <span>($10 USD)</span>", resp.content)
# course price is visible ihe course_about page when the course
# mode is set to honor and it's price is set
self.assertIn('<span class="important-dates-item-text">$10</span>', resp.content)
def test_invitation_only(self):
"""
This makes sure that the invitation only restirction takes prescendence over
any purchase enablements
"""
course = CourseFactory.create(metadata={"invitation_only": True})
self._set_ecomm(course)
self.setup_user()
url = reverse('about_course', args=[course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Enrollment in this course is by invitation only", resp.content)
def test_enrollment_cap(self):
"""
Make sure that capped enrollments work even with
paywalled courses
"""
course = CourseFactory.create(
metadata={
"max_student_enrollments_allowed": 1,
"display_coursenumber": "buyme",
}
)
self._set_ecomm(course)
self.setup_user()
url = reverse('about_course', args=[course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Add buyme to Cart <span>($10 USD)</span>", resp.content)
# note that we can't call self.enroll here since that goes through
# the Django student views, which doesn't allow for enrollments
# for paywalled courses
CourseEnrollment.enroll(self.user, course.id)
# create a new account since the first account is already enrolled in the course
email = 'foo_second@test.com'
password = 'bar'
username = 'test_second'
self.create_account(username,
email, password)
self.activate_user(email)
self.login(email, password)
# Get the about page again and make sure that the page says that the course is full
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Course is full", resp.content)
self.assertNotIn("Add buyme to Cart ($10)", resp.content)
def test_free_course_display(self):
"""
Make sure other courses that don't have shopping cart enabled don't display the add-to-cart button
and don't display the course_price field if Cosmetic Price is disabled.
"""
course = CourseFactory.create(org='MITx', number='free', display_name='Course For Free')
self.setup_user()
url = reverse('about_course', args=[course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertNotIn("Add free to Cart (Free)", resp.content)
self.assertNotIn('<p class="important-dates-item-title">Price</p>', resp.content)
class CourseAboutTestCaseCCX(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test for unenrolled student tries to access ccx.
Note: Only CCX coach can enroll a student in CCX. In sum self-registration not allowed.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
@classmethod
def setUpClass(cls):
super(CourseAboutTestCaseCCX, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(CourseAboutTestCaseCCX, self).setUp()
# Create ccx coach account
self.coach = coach = AdminFactory.create(password="test")
self.client.login(username=coach.username, password="test")
def test_redirect_to_dashboard_unenrolled_ccx(self):
"""
Assert that when unenrolled user tries to access CCX do not allow the user to self-register.
Redirect him to his student dashboard
"""
# create ccx
ccx = CcxFactory(course_id=self.course.id, coach=self.coach)
ccx_locator = CCXLocator.from_course_locator(self.course.id, unicode(ccx.id))
self.setup_user()
url = reverse('info', args=[ccx_locator])
response = self.client.get(url)
expected = reverse('dashboard')
self.assertRedirects(response, expected, status_code=302, target_status_code=200)
|
louyihua/edx-platform
|
lms/djangoapps/courseware/tests/test_about.py
|
Python
|
agpl-3.0
| 26,215
|
[
"VisIt"
] |
7a25114afad7f7550f3c23312742010c430fb64d2fe8197991023184bad005fd
|
# Copyright (C) 2002, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
Consumer class that builds a Structure object.
This is used by the PDBCoordsParser classes.
"""
import warnings
from Structures.Model import Model
from Structures.Chain import Chain
from Structures.Residue import Residue, DisorderedResidue
from Structures.Atom import Atom, DisorderedAtom
from Utils.Exceptions import PDBConstructionException, PDBConstructionWarning
class StructureBuilder:
"""
Deals with constructing the Structure object. The StructureBuilder class is used
by the PDBCoordsParser classes to translate a file to a Structure object.
"""
def __init__(self):
self.line_counter = 0
self.all_models_generated = []
def _is_completely_disordered(self, residue):
"Returns True if all atoms in the residue have a non blank altloc."
for atom in residue.all_atoms():
if atom.altloc() == " ":
return False
return True
# Public methods called by the Parser classes
def set_line_counter(self, line_counter):
"""
The line counter keeps track of the line in the PDB file that
is being parsed.
Arguments:
o line_counter - int
"""
assert(isinstance(line_counter, int))
self.line_counter = line_counter
def init_model(self, model_id, serial_num=None):
"""Initiate a new Model object with given id.
Arguments:
o id - int
o serial_num - int
"""
assert(isinstance(model_id, int) and (isinstance(serial_num, int) or serial_num is None))
self.all_models_generated.append(Model(model_id,serial_num))
self.current_model = self.all_models_generated[-1]
def init_chain(self, chain_id):
"""Initiate a new Chain object with given id.
Arguments:
o chain_id - string
"""
if self.current_model.has_chain_with_id(chain_id):
self.current_chain = self.current_model.chains(chain_id)
warnings.warn("WARNING: Chain %s is discontinuous at line %i."
% (chain_id, self.line_counter),
PDBConstructionWarning)
else:
self.current_chain = Chain(chain_id)
self.current_model.add_chain(self.current_chain)
def init_seg(self, segid):
"""Flag a change in segid.
Arguments:
o segid - string
"""
assert(isinstance(segid, str))
self.segid=segid
def init_residue(self, resname, field, resseq, icode):
"""
Initiate a new Residue object.
Arguments:
o resname - string, e.g. "ASN"
o field - hetero flag, "W" for waters, "H" for
hetero residues, otherwise blank.
o resseq - int, sequence identifier
o icode - string, insertion code
"""
if field == "H":
# The hetero field consists of H_ + the residue name (e.g. H_FUC)
field = "H_" + resname
res_id=(field, resseq, icode)
if field == " ":
if self.current_chain.has_residue_with_id(res_id):
# There already is a residue with the id (field, resseq, icode).
# This only makes sense in the case of a point mutation.
warnings.warn("WARNING: Residue ('%s', %i, '%s') "
"redefined at line %i."
% (field, resseq, icode, self.line_counter),
PDBConstructionWarning)
duplicate_residue = self.current_chain.residues(res_id)
if duplicate_residue.is_disordered() == 2:
# The residue in the chain is a DisorderedResidue object.
# So just add the last Residue object.
if duplicate_residue.has_residue_with_name(resname):
# The residue was already made
self.current_residue = duplicate_residue
duplicate_residue.set_main_disorder_identifier(resname)
else:
# Make a new residue and add it to the already
# present DisorderedResidue
new_residue = Residue(res_id, resname, self.segid)
duplicate_residue.add_residue(new_residue)
self.current_residue = duplicate_residue
return
else:
# Make a new DisorderedResidue object and put all
# the Residue objects with the id (field, resseq, icode) in it.
# These residues each should have non-blank altlocs for all their atoms.
# If not, the PDB file probably contains an error.
if not self._is_completely_disordered(duplicate_residue):
# if this exception is ignored, a residue will be missing
self.current_residue = None
raise PDBConstructionException(\
"Blank altlocs in duplicate residue %s ('%s', %i, '%s')" \
% (resname, field, resseq, icode))
self.current_chain.remove_residue_with_id(res_id)
new_residue = Residue(res_id, resname, self.segid)
disordered_residue = DisorderedResidue(res_id)
self.current_chain.add_residue(disordered_residue)
disordered_residue.add_residue(duplicate_residue)
disordered_residue.add_residue(new_residue)
self.current_residue = disordered_residue
return
residue = Residue(res_id, resname, self.segid)
self.current_chain.add_residue(residue)
self.current_residue = residue
def init_atom(self, name, coord, b_factor, occupancy, altloc, fullname,
serial_number=None, element=None):
"""
Initiate a new Atom object.
Arguments:
o name - string, atom name, e.g. CA, spaces should be stripped
o coord - Numeric array (Float0, size 3), atomic coordinates
o b_factor - float, B factor
o occupancy - float
o altloc - string, alternative location specifier
o fullname - string, atom name including spaces, e.g. " CA "
o element - string, upper case, e.g. "HG" for mercury
"""
residue = self.current_residue
# if residue is None, an exception was generated during
# the construction of the residue
if residue is None:
return
# First check if this atom is already present in the residue.
# If it is, it might be due to the fact that the two atoms have atom
# names that differ only in spaces (e.g. "CA.." and ".CA.",
# where the dots are spaces). If that is so, use all spaces
# in the atom name of the current atom.
if residue.has_atom_with_id(name):
duplicate_atom = residue.atoms(name)
# atom name with spaces of duplicate atom
duplicate_fullname = duplicate_atom.fullname()
if duplicate_fullname != fullname:
# name of current atom now includes spaces
name = fullname
warnings.warn("WARNING: atom names %s and %s differ "
"only in spaces at line %i."
% (duplicate_fullname, fullname,
self.line_counter),
PDBConstructionWarning)
atom = self.current_atom = Atom(serial_number, name, coord, element)
atom.pdb_atom_post_initialize(b_factor, occupancy, altloc, fullname)
if altloc!=" ":
# The atom is disordered
if residue.has_atom_with_id(name):
# Residue already contains this atom
duplicate_atom = residue.atoms(name)
if duplicate_atom.is_disordered() is 2:
duplicate_atom.add_atom(atom)
else:
# This is an error in the PDB file:
# a disordered atom is found with a blank altloc
# Detach the duplicate atom, and put it in a
# DisorderedAtom object together with the current
# atom.
residue.remove_atom_with_id(name)
disordered_atom = DisorderedAtom(name)
residue.add_atom(disordered_atom)
disordered_atom.add_atom(atom)
disordered_atom.add_atom(duplicate_atom)
residue.flag_disordered()
warnings.warn("WARNING: disordered atom found "
"with blank altloc before line %i.\n"
% self.line_counter,
PDBConstructionWarning)
else:
# The residue does not contain this disordered atom
# so we create a new one.
disordered_atom = DisorderedAtom(name)
residue.add_atom(disordered_atom)
# Add the real atom to the disordered atom, and the
# disordered atom to the residue
disordered_atom.add_atom(atom)
residue.flag_disordered()
else:
# The atom is not disordered
residue.add_atom(atom)
def set_anisou(self, anisou_array):
"Set anisotropic B factor of current Atom."
self.current_atom.set_anisou(anisou_array)
def set_siguij(self, siguij_array):
"Set standard deviation of anisotropic B factor of current Atom."
self.current_atom.set_siguij(siguij_array)
def set_sigatm(self, sigatm_array):
"Set standard deviation of atom position of current Atom."
self.current_atom.set_sigatm(sigatm_array)
|
q10/fiddle
|
python/Parsers/StructureBuilder.py
|
Python
|
bsd-3-clause
| 10,466
|
[
"Biopython"
] |
b5ac0eef9668ad87c5a0154f0ba3a93ec8fd9a2270ab8f0c66ae34bf8b9da21a
|
"""
AccountingCLI class implementing command line administrative interface to
DIRAC Accounting DataStore Service
"""
import sys
from DIRAC import gLogger
from DIRAC.Core.Base.CLI import CLI, colorize
from DIRAC.AccountingSystem.Client.DataStoreClient import DataStoreClient
class AccountingCLI(CLI):
def __init__(self):
CLI.__init__(self)
self.do_connect(None)
def start(self):
"""
Start the command loop
"""
if not self.connected:
gLogger.error("Client is not connected")
try:
self.cmdloop()
except KeyboardInterrupt:
gLogger.warn("Received a keyboard interrupt.")
self.do_quit("")
def do_connect(self, args):
"""
Tries to connect to the server
Usage: connect
"""
gLogger.info("Trying to connect to server")
self.connected = False
self.prompt = "(%s)> " % colorize("Not connected", "red")
acClient = DataStoreClient()
retVal = acClient.ping()
if retVal["OK"]:
self.prompt = "(%s)> " % colorize("Connected", "green")
self.connected = True
def printComment(self, comment):
commentList = comment.split("\n")
for commentLine in commentList[:-1]:
print("# %s" % commentLine.strip())
def showTraceback(self):
import traceback
type, value = sys.exc_info()[:2]
print("________________________\n")
print("Exception", type, ":", value)
traceback.print_tb(sys.exc_info()[2])
print("________________________\n")
def do_registerType(self, args):
"""
Registers a new accounting type
Usage : registerType <typeName>
<DIRACRoot>/DIRAC/AccountingSystem/Client/Types/<typeName>
should exist and inherit the base type
"""
try:
argList = args.split()
if argList:
typeName = argList[0].strip()
else:
gLogger.error("No type name specified")
return
# Try to import the type
try:
typeModule = __import__(
"DIRAC.AccountingSystem.Client.Types.%s" % typeName, globals(), locals(), typeName
)
typeClass = getattr(typeModule, typeName)
except Exception as e:
gLogger.error("Can't load type %s: %s" % (typeName, str(e)))
return
gLogger.info("Loaded type %s" % typeClass.__name__)
typeDef = typeClass().getDefinition()
acClient = DataStoreClient()
retVal = acClient.registerType(*typeDef)
if retVal["OK"]:
gLogger.info("Type registered successfully")
else:
gLogger.error("Error: %s" % retVal["Message"])
except Exception:
self.showTraceback()
def do_resetBucketLength(self, args):
"""
Set the bucket Length. Will trigger a recalculation of buckets. Can take a while.
Usage : resetBucketLength <typeName>
<DIRACRoot>/DIRAC/AccountingSystem/Client/Types/<typeName>
should exist and inherit the base type
"""
try:
argList = args.split()
if argList:
typeName = argList[0].strip()
else:
gLogger.error("No type name specified")
return
# Try to import the type
try:
typeModule = __import__(
"DIRAC.AccountingSystem.Client.Types.%s" % typeName, globals(), locals(), typeName
)
typeClass = getattr(typeModule, typeName)
except Exception as e:
gLogger.error("Can't load type %s: %s" % (typeName, str(e)))
return
gLogger.info("Loaded type %s" % typeClass.__name__)
typeDef = typeClass().getDefinition()
acClient = DataStoreClient()
retVal = acClient.setBucketsLength(typeDef[0], typeDef[3])
if retVal["OK"]:
gLogger.info("Type registered successfully")
else:
gLogger.error("Error: %s" % retVal["Message"])
except Exception:
self.showTraceback()
def do_regenerateBuckets(self, args):
"""
Regenerate buckets for type. Can take a while.
Usage : regenerateBuckets <typeName>
<DIRACRoot>/DIRAC/AccountingSystem/Client/Types/<typeName>
should exist and inherit the base type
"""
try:
argList = args.split()
if argList:
typeName = argList[0].strip()
else:
gLogger.error("No type name specified")
return
# Try to import the type
try:
typeModule = __import__(
"DIRAC.AccountingSystem.Client.Types.%s" % typeName, globals(), locals(), typeName
)
typeClass = getattr(typeModule, typeName)
except Exception as e:
gLogger.error("Can't load type %s: %s" % (typeName, str(e)))
return
gLogger.info("Loaded type %s" % typeClass.__name__)
typeDef = typeClass().getDefinition()
acClient = DataStoreClient()
retVal = acClient.regenerateBuckets(typeDef[0])
if retVal["OK"]:
gLogger.info("Buckets recalculated!")
else:
gLogger.error("Error: %s" % retVal["Message"])
except Exception:
self.showTraceback()
def do_showRegisteredTypes(self, args):
"""
Get a list of registered types
Usage : showRegisteredTypes
"""
try:
acClient = DataStoreClient()
retVal = acClient.getRegisteredTypes()
print(retVal)
if not retVal["OK"]:
gLogger.error("Error: %s" % retVal["Message"])
return
for typeList in retVal["Value"]:
print(typeList[0])
print(" Key fields:\n %s" % "\n ".join(typeList[1]))
print(" Value fields:\n %s" % "\n ".join(typeList[2]))
except Exception:
self.showTraceback()
def do_deleteType(self, args):
"""
Delete a registered accounting type.
Usage : deleteType <typeName>
WARN! It will delete all data associated to that type! VERY DANGEROUS!
If you screw it, you'll discover a new dimension of pain and doom! :)
"""
try:
argList = args.split()
if argList:
typeName = argList[0].strip()
else:
gLogger.error("No type name specified")
return
while True:
choice = input(
"Are you completely sure you want to delete type %s and all it's data? yes/no [no]: " % typeName
)
choice = choice.lower()
if choice in ("yes", "y"):
break
else:
print("Delete aborted")
return
acClient = DataStoreClient()
retVal = acClient.deleteType(typeName)
if not retVal["OK"]:
gLogger.error("Error: %s" % retVal["Message"])
return
print("Hope you meant it, because it's done")
except Exception:
self.showTraceback()
def do_compactBuckets(self, args):
"""
Compact buckets table
Usage : compactBuckets
"""
try:
acClient = DataStoreClient()
retVal = acClient.compactDB()
if not retVal["OK"]:
gLogger.error("Error: %s" % retVal["Message"])
return
gLogger.info("Done")
except Exception:
self.showTraceback()
|
DIRACGrid/DIRAC
|
src/DIRAC/AccountingSystem/Client/AccountingCLI.py
|
Python
|
gpl-3.0
| 8,061
|
[
"DIRAC"
] |
17ec4a4bdcc243d2e05628733630d016ebbc10a549bf27d2426fb3b00ad50d7a
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import numpy as np
def get_atom2bas_s(_bas):
"""
For a given _bas list (see mole and mole_pure from pySCF)
constructs a list of atom --> start shell
The list is natoms+1 long, i.e. larger than number of atoms
The list can be used to get the start,finish indices of pySCF's multipletts^*
This is useful to compose shls_slice arguments for the pySCF integral evaluators .intor(...)
^* pySCF multipletts can be "repeated" number of contractions times
"""
natoms = max([bb[0] for bb in _bas])+1
atom2bas_s = np.array([len(_bas)]*(natoms+1), dtype=np.int32)
for ib,[at,l,ngto,nctr,a,b,c,d] in enumerate(_bas): atom2bas_s[at] = min(atom2bas_s[at],ib)
return atom2bas_s
if __name__ == '__main__':
"""
Compute only bilocal part of the four-orbitals, two-center Coulomb integrals
"""
from pyscf import gto
from pyscf.nao.m_system_vars import system_vars_c
from pyscf.nao.m_conv_yzx2xyz import conv_yzx2xyz_c
tol = 1e-5
mol = gto.M(atom='O 0 0 0; H 0 -0.1 1; H 0 0.1 -1', basis='ccpvdz')
sv = system_vars_c(gto=mol)
na = sv.natm
for ia1,n1 in zip(range(na), sv.atom2s[1:]-sv.atom2s[0:na]):
for ia2,n2 in zip(range(ia1+1,sv.natm+1), sv.atom2s[ia1+2:]-sv.atom2s[ia1+1:na]):
mol2 = gto.Mole_pure(atom=[mol._atom[ia1], mol._atom[ia2]], basis=mol.basis).build()
bs = get_atom2bas_s(mol2._bas)
ss = (bs[0],bs[1], bs[1],bs[2], bs[0],bs[1], bs[1],bs[2])
eri = mol2.intor('cint2e_sph', shls_slice=ss).reshape([n1,n2,n1,n2])
eri = conv_yzx2xyz_c(mol2).conv_yzx2xyz_4d(eri, 'pyscf2nao', ss).reshape([n1*n2,n1*n2])
ee,xx = np.linalg.eigh(eri)
nlinindep = list(ee>tol).count(True)
print(' ia1, ia2, n1, n2: ', ia1, ia2, n1, n2, eri.shape, n1*n2, nlinindep, n1*n2/nlinindep)
|
gkc1000/pyscf
|
pyscf/nao/m_get_atom2bas_s.py
|
Python
|
apache-2.0
| 2,437
|
[
"PySCF"
] |
1ef9daa87ec933aeaf1ad0ddc986f9173e7d1b527315d10bf198db97c4d05a39
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Simon Perkins
#
# This file is part of montblanc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import collections
import copy
import itertools
import threading
import sys
import types
import six
import concurrent.futures as cf
import numpy as np
import tensorflow as tf
from tensorflow.python.client import timeline
from attrdict import AttrDict
import attr
import montblanc
import montblanc.util as mbu
from montblanc.src_types import source_var_types
from montblanc.solvers import MontblancTensorflowSolver
from . import load_tf_lib
from .cube_dim_transcoder import CubeDimensionTranscoder
from .staging_area_wrapper import create_staging_area_wrapper
from .sources import (SourceContext, DefaultsSourceProvider)
from .sinks import (SinkContext, NullSinkProvider)
from .start_context import StartContext
from .stop_context import StopContext
from .init_context import InitialisationContext
ONE_KB, ONE_MB, ONE_GB = 1024, 1024**2, 1024**3
QUEUE_SIZE = 10
rime = load_tf_lib()
DataSource = attr.make_class("DataSource", ['source', 'dtype', 'name'],
slots=True, frozen=True)
DataSink = attr.make_class("DataSink", ['sink', 'name'],
slots=True, frozen=True)
FeedOnce = attr.make_class("FeedOnce", ['ph', 'var', 'assign_op'],
slots=True, frozen=True)
class RimeSolver(MontblancTensorflowSolver):
""" RIME Solver Implementation """
def __init__(self, slvr_cfg):
"""
RimeSolver Constructor
Parameters:
slvr_cfg : SolverConfiguration
Solver Configuration variables
"""
super(RimeSolver, self).__init__(slvr_cfg)
#=========================================
# Register hypercube Dimensions
#=========================================
cube, slvr_cfg = self.hypercube, self.config()
_setup_hypercube(cube, slvr_cfg)
#=======================
# Data Sources and Sinks
#=======================
# Get the defaults data source (default or test data)
data_source = slvr_cfg['data_source']
montblanc.log.info("Defaults Data Source '{}'".format(data_source))
# Construct list of data sources and sinks
# internal to the solver.
# These will be overridden by source and sink
# providers supplied by the user in the solve()
# method
default_prov = _create_defaults_source_provider(cube, data_source)
self._source_providers = [default_prov]
self._sink_providers = [NullSinkProvider()]
#==================
# Data Source Cache
#==================
class SourceCache(object):
def __init__(self):
self._cache = {}
self._lock = threading.Lock()
def __getitem__(self, key):
key = hash(bytes(key))
with self._lock:
return self._cache[key]
def __setitem__(self, key, value):
key = hash(bytes(key))
with self._lock:
self._cache[key]=value
def __delitem__(self, key):
key = hash(bytes(key))
with self._lock:
del self._cache[key]
def pop(self, key, default=None):
key = hash(bytes(key))
with self._lock:
return self._cache.pop(key, default)
self._source_cache = SourceCache()
#==================
# Memory Budgeting
#==================
# For deciding whether to rebudget
self._previous_budget = 0
self._previous_budget_dims = {}
#================
# Cube Transcoder
#================
self._iter_dims = ['ntime', 'nbl']
self._transcoder = CubeDimensionTranscoder(self._iter_dims)
#================================
# Staging Area Data Source Configuration
#================================
dfs = { n: a for n, a in list(cube.arrays().items())
if not 'temporary' in a.tags }
# Descriptors are not user-defined arrays
# but a variable passed through describing a chunk of the
# problem. Make it look as if it's an array
if 'descriptor' in dfs:
raise KeyError("'descriptor' is reserved, "
"please use another array name.")
dfs['descriptor'] = AttrDict(dtype=np.int32)
#=========================
# Tensorflow devices
#=========================
from tensorflow.python.client import device_lib
devices = device_lib.list_local_devices()
device_type = slvr_cfg['device_type'].upper()
gpus = [d.name for d in devices if d.device_type == 'GPU']
cpus = [d.name for d in devices if d.device_type == 'CPU']
if device_type == 'GPU' and len(gpus) == 0:
montblanc.log.warning("No GPUs are present, falling back to CPU.")
device_type = 'CPU'
use_cpus = device_type == 'CPU'
montblanc.log.info("Using '{}' devices for compute".format(device_type))
self._devices = cpus if use_cpus else gpus
self._shards_per_device = spd = 2
self._nr_of_shards = shards = len(self._devices)*spd
# shard_id == d*spd + shard
self._shard = lambda d, s: d*spd + s
assert len(self._devices) > 0
#=========================
# Tensorflow Compute Graph
#=========================
# Create all tensorflow constructs within the compute graph
with tf.Graph().as_default() as compute_graph:
# Create our data feeding structure containing
# input/output staging_areas and feed once variables
self._tf_feed_data = _construct_tensorflow_feed_data(
dfs, cube, self._iter_dims, shards)
# Construct tensorflow expressions for each shard
self._tf_expr = [_construct_tensorflow_expression(
slvr_cfg,
self._tf_feed_data, dev, self._shard(d,s))
for d, dev in enumerate(self._devices)
for s in range(self._shards_per_device)]
# Now forbid modification of the graph
compute_graph.finalize()
#==========================================
# Tensorflow Session
#==========================================
# Create the tensorflow session object
# Use supplied target, if present
tf_server_target = slvr_cfg.get('tf_server_target', '')
montblanc.log.debug("Attaching session to tensorflow server "
"'{tfs}'".format(tfs=tf_server_target))
session_config = tf.compat.v1.ConfigProto(allow_soft_placement=True)
self._tf_session = tf.compat.v1.Session(tf_server_target,
graph=compute_graph, config=session_config)
#======================
# Thread pool executors
#======================
tpe = cf.ThreadPoolExecutor
self._descriptor_executor = tpe(1)
self._feed_executors = [tpe(1) for i in range(shards)]
self._compute_executors = [tpe(1) for i in range(shards)]
self._consumer_executor = tpe(1)
class InputsWaiting(object):
"""
Keep track of the number of inputs waiting
to be consumed on each shard
"""
def __init__(self, shards):
self._lock = threading.Lock()
self._inputs_waiting = np.zeros(shape=(shards,), dtype=np.int32)
def get(self):
with self._lock:
return self._inputs_waiting
def increment(self, shard):
with self._lock:
self._inputs_waiting[shard] += 1
def decrement(self, shard):
with self._lock:
self._inputs_waiting[shard] -= 1
self._inputs_waiting = InputsWaiting(shards)
#======================
# Tracing
#======================
class RunMetaData(object):
def __init__(self):
self._rm = []
self._lock = threading.Lock()
def clear(self):
with self._lock:
self._rm = []
def save(self, run_metadata):
with self._lock:
self._rm.append(run_metadata)
def write(self, tag=None):
with self._lock:
if len(self._rm) == 0:
return
if tag is None:
tag='0'
metadata = tf.compat.v1.RunMetadata()
[metadata.MergeFrom(m) for m in self._rm]
tl = timeline.Timeline(metadata.step_stats)
trace_filename = 'compute_timeline_%d.json' % tag
with open(trace_filename, 'w') as f:
f.write(tl.generate_chrome_trace_format())
f.write('\n')
#============================
# Wrap tensorflow Session.run
#============================
self._should_trace = False
self._run_metadata = RunMetaData()
def _tfrunner(session, should_trace=False):
""" Wrap the tensorflow Session.run method """
trace_level = (tf.compat.v1.RunOptions.FULL_TRACE if should_trace
else tf.compat.v1.RunOptions.NO_TRACE)
options = tf.compat.v1.RunOptions(trace_level=trace_level)
def _runner(*args, **kwargs):
""" Pass options through """
return session.run(*args, options=options, **kwargs)
def _meta_runner(*args, **kwargs):
""" Aggregate run metadata for each run """
try:
run_metadata = tf.compat.v1.RunMetadata()
return session.run(*args, options=options,
run_metadata=run_metadata,
**kwargs)
finally:
self._run_metadata.save(run_metadata)
return _meta_runner if should_trace else _runner
self._tfrun = _tfrunner(self._tf_session, self._should_trace)
self._iterations = 0
def _descriptor_feed(self):
try:
self._descriptor_feed_impl()
except Exception as e:
montblanc.log.exception("Descriptor Exception")
raise
def _descriptor_feed_impl(self):
session = self._tf_session
# Copy dimensions of the main cube
cube = self.hypercube.copy()
LSA = self._tf_feed_data.local
# Get space of iteration
iter_args = _iter_args(self._iter_dims, cube)
descriptors_fed = 0
# Iterate through the hypercube space
for i, iter_cube in enumerate(cube.cube_iter(*iter_args)):
descriptor = self._transcoder.encode(iter_cube.dimensions(copy=False))
feed_dict = {LSA.descriptor.placeholders[0] : descriptor }
montblanc.log.debug('Encoding {i} {d}'.format(i=i, d=descriptor))
session.run(LSA.descriptor.put_op, feed_dict=feed_dict)
descriptors_fed += 1
montblanc.log.info("Done feeding {n} descriptors.".format(
n=descriptors_fed))
feed_dict = {LSA.descriptor.placeholders[0] : [-1] }
session.run(LSA.descriptor.put_op, feed_dict=feed_dict)
def _feed(self, cube, data_sources, data_sinks, global_iter_args):
""" Feed stub """
try:
self._feed_impl(cube, data_sources, data_sinks, global_iter_args)
except Exception as e:
montblanc.log.exception("Feed Exception")
raise
def _feed_impl(self, cube, data_sources, data_sinks, global_iter_args):
""" Implementation of staging_area feeding """
session = self._tf_session
FD = self._tf_feed_data
LSA = FD.local
# Get source strides out before the local sizes are modified during
# the source loops below
src_types = list(LSA.sources.keys())
src_strides = [int(i) for i in cube.dim_extent_size(*src_types)]
src_staging_areas = [[LSA.sources[t][s] for t in src_types]
for s in range(self._nr_of_shards)]
compute_feed_dict = { ph: cube.dim_global_size(n) for
n, ph in list(FD.src_ph_vars.items()) }
compute_feed_dict.update({ ph: getattr(cube, n) for
n, ph in list(FD.property_ph_vars.items()) })
chunks_fed = 0
which_shard = itertools.cycle([self._shard(d,s)
for s in range(self._shards_per_device)
for d, dev in enumerate(self._devices)])
while True:
try:
# Get the descriptor describing a portion of the RIME
result = session.run(LSA.descriptor.get_op)
descriptor = result['descriptor']
except tf.errors.OutOfRangeError as e:
montblanc.log.exception("Descriptor reading exception")
# Quit if EOF
if descriptor[0] == -1:
break
# Make it read-only so we can hash the contents
descriptor.flags.writeable = False
# Find indices of the emptiest staging_areas and, by implication
# the shard with the least work assigned to it
emptiest_staging_areas = np.argsort(self._inputs_waiting.get())
shard = emptiest_staging_areas[0]
shard = next(which_shard)
feed_f = self._feed_executors[shard].submit(self._feed_actual,
data_sources.copy(), cube.copy(),
descriptor, shard,
src_types, src_strides, src_staging_areas[shard],
global_iter_args)
compute_f = self._compute_executors[shard].submit(self._compute,
compute_feed_dict, shard)
consume_f = self._consumer_executor.submit(self._consume,
data_sinks.copy(), cube.copy(), global_iter_args)
self._inputs_waiting.increment(shard)
yield (feed_f, compute_f, consume_f)
chunks_fed += 1
montblanc.log.info("Done feeding {n} chunks.".format(n=chunks_fed))
def _feed_actual(self, *args):
try:
return self._feed_actual_impl(*args)
except Exception as e:
montblanc.log.exception("Feed Exception")
raise
def _feed_actual_impl(self, data_sources, cube,
descriptor, shard,
src_types, src_strides, src_staging_areas,
global_iter_args):
session = self._tf_session
iq = self._tf_feed_data.local.feed_many[shard]
# Decode the descriptor and update our cube dimensions
dims = self._transcoder.decode(descriptor)
cube.update_dimensions(dims)
# Determine array shapes and data types for this
# portion of the hypercube
array_schemas = cube.arrays(reify=True)
# Inject a data source and array schema for the
# descriptor staging_area items.
# These aren't full on arrays per se
# but they need to work within the feeding framework
array_schemas['descriptor'] = descriptor
data_sources['descriptor'] = DataSource(
lambda c: descriptor, np.int32, 'Internal')
# Generate (name, placeholder, datasource, array schema)
# for the arrays required by each staging_area
gen = ((a, ph, data_sources[a], array_schemas[a])
for ph, a in zip(iq.placeholders, iq.fed_arrays))
# Get input data by calling the data source functors
input_data = [(a, ph, _get_data(ds, SourceContext(a, cube,
self.config(), global_iter_args,
cube.array(a) if a in cube.arrays() else {},
ad.shape, ad.dtype)))
for (a, ph, ds, ad) in gen]
# Create a feed dictionary from the input data
feed_dict = { ph: data for (a, ph, data) in input_data }
# Cache the inputs for this chunk of data,
# so that sinks can access them
input_cache = { a: data for (a, ph, data) in input_data }
self._source_cache[descriptor.data] = input_cache
montblanc.log.info("Enqueueing chunk {d} on shard {sh}".format(
d=descriptor, sh=shard))
self._tfrun(iq.put_op, feed_dict=feed_dict)
# For each source type, feed that source staging_area
for src_type, staging_area, stride in zip(src_types, src_staging_areas, src_strides):
iter_args = [(src_type, stride)]
# Iterate over chunks of the source
for chunk_i, dim_desc in enumerate(cube.dim_iter(*iter_args)):
cube.update_dimensions(dim_desc)
s = dim_desc[0]['upper_extent'] - dim_desc[0]['lower_extent']
montblanc.log.info("'{ci}: Enqueueing {d} '{s}' '{t}' sources "
"on shard {sh}".format(d=descriptor,
ci=chunk_i, s=s, t=src_type, sh=shard))
# Determine array shapes and data types for this
# portion of the hypercube
array_schemas = cube.arrays(reify=True)
# Generate (name, placeholder, datasource, array descriptor)
# for the arrays required by each staging_area
gen = [(a, ph, data_sources[a], array_schemas[a])
for ph, a in zip(staging_area.placeholders, staging_area.fed_arrays)]
# Create a feed dictionary by calling the data source functors
feed_dict = { ph: _get_data(ds, SourceContext(a, cube,
self.config(), global_iter_args + iter_args,
cube.array(a) if a in cube.arrays() else {},
ad.shape, ad.dtype))
for (a, ph, ds, ad) in gen }
self._tfrun(staging_area.put_op, feed_dict=feed_dict)
def _compute(self, feed_dict, shard):
""" Call the tensorflow compute """
try:
descriptor, enq = self._tfrun(self._tf_expr[shard], feed_dict=feed_dict)
self._inputs_waiting.decrement(shard)
except Exception as e:
montblanc.log.exception("Compute Exception")
raise
def _consume(self, data_sinks, cube, global_iter_args):
""" Consume stub """
try:
return self._consume_impl(data_sinks, cube, global_iter_args)
except Exception as e:
montblanc.log.exception("Consumer Exception")
six.reraise(Exception, Exception(e), sys.exc_info()[2])
def _consume_impl(self, data_sinks, cube, global_iter_args):
""" Consume """
LSA = self._tf_feed_data.local
output = self._tfrun(LSA.output.get_op)
# Expect the descriptor in the first tuple position
assert len(output) > 0
assert LSA.output.fed_arrays[0] == 'descriptor'
descriptor = output['descriptor']
# Make it read-only so we can hash the contents
descriptor.flags.writeable = False
dims = self._transcoder.decode(descriptor)
cube.update_dimensions(dims)
# Obtain and remove input data from the source cache
try:
input_data = self._source_cache.pop(descriptor.data)
except KeyError:
raise ValueError("No input data cache available "
"in source cache for descriptor {}!"
.format(descriptor))
# For each array in our output, call the associated data sink
gen = ((n, a) for n, a in list(output.items()) if not n == 'descriptor')
for n, a in gen:
sink_context = SinkContext(n, cube,
self.config(), global_iter_args,
cube.array(n) if n in cube.arrays() else {},
a, input_data)
_supply_data(data_sinks[n], sink_context)
def solve(self, *args, **kwargs):
# Obtain source and sink providers, including internal providers
source_providers = (self._source_providers +
kwargs.get('source_providers', []))
sink_providers = (self._sink_providers +
kwargs.get('sink_providers', []))
src_provs_str = 'Source Providers ' + str([sp.name() for sp
in source_providers])
snk_provs_str = 'Sink Providers ' + str([sp.name() for sp
in sink_providers])
montblanc.log.info(src_provs_str)
montblanc.log.info(snk_provs_str)
# Allow providers to initialise themselves based on
# the given configuration
ctx = InitialisationContext(self.config())
for p in itertools.chain(source_providers, sink_providers):
p.init(ctx)
# Apply any dimension updates from the source provider
# to the hypercube, taking previous reductions into account
bytes_required = _apply_source_provider_dim_updates(
self.hypercube, source_providers,
self._previous_budget_dims)
# If we use more memory than previously,
# perform another budgeting operation
# to make sure everything fits
if bytes_required > self._previous_budget:
self._previous_budget_dims, self._previous_budget = (
_budget(self.hypercube, self.config()))
# Determine the global iteration arguments
# e.g. [('ntime', 100), ('nbl', 20)]
global_iter_args = _iter_args(self._iter_dims, self.hypercube)
# Indicate solution started in providers
ctx = StartContext(self.hypercube, self.config(), global_iter_args)
for p in itertools.chain(source_providers, sink_providers):
p.start(ctx)
#===================================
# Assign data to Feed Once variables
#===================================
# Copy the hypercube
cube = self.hypercube.copy()
array_schemas = cube.arrays(reify=True)
# Construct data sources from those supplied by the
# source providers, if they're associated with
# input sources
LSA = self._tf_feed_data.local
input_sources = LSA.input_sources
data_sources = {n: DataSource(f, cube.array(n).dtype, prov.name())
for prov in source_providers
for n, f in list(prov.sources().items())
if n in input_sources}
# Get data sinks from supplied providers
data_sinks = { n: DataSink(f, prov.name())
for prov in sink_providers
for n, f in list(prov.sinks().items())
if not n == 'descriptor' }
# Construct a feed dictionary from data sources
feed_dict = { fo.ph: _get_data(data_sources[k],
SourceContext(k, cube,
self.config(), global_iter_args,
cube.array(k) if k in cube.arrays() else {},
array_schemas[k].shape,
array_schemas[k].dtype))
for k, fo
in list(LSA.feed_once.items()) }
self._run_metadata.clear()
# Run the assign operations for each feed_once variable
assign_ops = [fo.assign_op.op for fo in list(LSA.feed_once.values())]
self._tfrun(assign_ops, feed_dict=feed_dict)
try:
# Run the descriptor executor immediately
params = self._descriptor_executor.submit(self._descriptor_feed)
# Sets to track futures not yet completed
feed_not_done = set()
compute_not_done = set([params])
consume_not_done = set()
throttle_factor = self._nr_of_shards*QUEUE_SIZE
# _feed_impl generates 3 futures
# one for feeding data, one for computing with this data
# and another for consuming it.
# Iterate over these futures
for feed, compute, consume in self._feed_impl(cube,
data_sources, data_sinks, global_iter_args):
feed_not_done.add(feed)
compute_not_done.add(compute)
consume_not_done.add(consume)
# If there are many feed futures in flight,
# perform throttling
if len(feed_not_done) > throttle_factor*2:
# Wait for throttle_factor futures to complete
fit = cf.as_completed(feed_not_done)
feed_done = set(itertools.islice(fit, throttle_factor))
feed_not_done.difference_update(feed_done)
# Take an completed compute and consume
# futures immediately
compute_done, compute_not_done = cf.wait(
compute_not_done, timeout=0,
return_when=cf.FIRST_COMPLETED)
consume_done, consume_not_done = cf.wait(
consume_not_done, timeout=0,
return_when=cf.FIRST_COMPLETED)
# Get future results, mainly to fire exceptions
for i, f in enumerate(itertools.chain(feed_done,
compute_done, consume_done)):
f.result()
not_done = sum(len(s) for s in (feed_not_done,
compute_not_done, consume_not_done))
montblanc.log.debug("Consumed {} futures. "
"{} remaining".format(i, not_done))
# Request future results, mainly for exceptions
for f in cf.as_completed(itertools.chain(feed_not_done,
compute_not_done, consume_not_done)):
f.result()
except (KeyboardInterrupt, SystemExit) as e:
montblanc.log.exception('Solving interrupted')
raise
except Exception:
montblanc.log.exception('Solving exception')
raise
else:
if self._should_trace:
self._run_metadata.write(self._iterations)
self._iterations += 1
finally:
# Indicate solution stopped in providers
ctx = StopContext(self.hypercube, self.config(), global_iter_args)
for p in itertools.chain(source_providers, sink_providers):
p.stop(ctx)
montblanc.log.info('Solution Completed')
def close(self):
# Shutdown thread executors
self._descriptor_executor.shutdown()
[fe.shutdown() for fe in self._feed_executors]
[ce.shutdown() for ce in self._compute_executors]
self._consumer_executor.shutdown()
# Shutdown thte tensorflow session
self._tf_session.close()
# Shutdown data sources
for source in self._source_providers:
source.close()
# Shutdown data sinks
for sink in self._sink_providers:
sink.close()
def __enter__(self):
return self
def __exit__(self, etype, evalue, etrace):
self.close()
def _create_defaults_source_provider(cube, data_source):
"""
Create a DefaultsSourceProvider object. This provides default
data sources for each array defined on the hypercube. The data sources
may either by obtained from the arrays 'default' data source
or the 'test' data source.
"""
from montblanc.impl.rime.tensorflow.sources import (
find_sources, DEFAULT_ARGSPEC)
from montblanc.impl.rime.tensorflow.sources import constant_cache
# Obtain default data sources for each array,
# Just take from defaults if test data isn't specified
staging_area_data_source = ('default' if not data_source == 'test'
else data_source)
cache = True
default_prov = DefaultsSourceProvider(cache=cache)
# Create data sources on the source provider from
# the cube array data sources
for n, a in list(cube.arrays().items()):
# Unnecessary for temporary arrays
if 'temporary' in a.tags:
continue
# Obtain the data source
data_source = a.get(staging_area_data_source)
# Array marked as constant, decorate the data source
# with a constant caching decorator
if cache is True and 'constant' in a.tags:
data_source = constant_cache(data_source)
method = types.MethodType(data_source, default_prov)
setattr(default_prov, n, method)
def _sources(self):
"""
Override the sources method to also handle lambdas that look like
lambda s, c: ..., as defined in the config module
"""
try:
return self._sources
except AttributeError:
self._sources = find_sources(self, [DEFAULT_ARGSPEC] + [['s', 'c']])
return self._sources
# Monkey patch the sources method
default_prov.sources = types.MethodType(_sources, default_prov)
return default_prov
def _construct_tensorflow_feed_data(dfs, cube, iter_dims,
nr_of_input_staging_areas):
FD = AttrDict()
# https://github.com/bcj/AttrDict/issues/34
FD._setattr('_sequence_type', list)
# Reference local staging_areas
FD.local = local = AttrDict()
# https://github.com/bcj/AttrDict/issues/34
local._setattr('_sequence_type', list)
# Create placholder variables for source counts
FD.src_ph_vars = AttrDict({
n: tf.compat.v1.placeholder(dtype=tf.int32, shape=(), name=n)
for n in ['nsrc'] + mbu.source_nr_vars()})
# Create placeholder variables for properties
FD.property_ph_vars = AttrDict({
n: tf.compat.v1.placeholder(dtype=p.dtype, shape=(), name=n)
for n, p in list(cube.properties().items()) })
#========================================================
# Determine which arrays need feeding once/multiple times
#========================================================
# Take all arrays flagged as input
input_arrays = [a for a in list(cube.arrays().values())
if 'input' in a.tags]
src_data_sources, feed_many, feed_once = _partition(iter_dims,
input_arrays)
#=====================================
# Descriptor staging area
#=====================================
local.descriptor = create_staging_area_wrapper('descriptors',
['descriptor'], dfs)
#===========================================
# Staging area for multiply fed data sources
#===========================================
# Create the staging_area for holding the feed many input
local.feed_many = [create_staging_area_wrapper('feed_many_%d' % i,
['descriptor'] + [a.name for a in feed_many], dfs)
for i in range(nr_of_input_staging_areas)]
#=================================================
# Staging areas for each radio source data sources
#=================================================
# Create the source array staging areas
local.sources = { src_nr_var: [
create_staging_area_wrapper('%s_%d' % (src_type, i),
[a.name for a in src_data_sources[src_nr_var]], dfs)
for i in range(nr_of_input_staging_areas)]
for src_type, src_nr_var in list(source_var_types().items())
}
#======================================
# The single output staging_area
#======================================
local.output = create_staging_area_wrapper('output',
['descriptor', 'model_vis', 'chi_squared'], dfs)
#=================================================
# Create tensorflow variables which are
# fed only once via an assign operation
#=================================================
def _make_feed_once_tuple(array):
name = array.name
dtype = dfs[name].dtype
shape = tuple([None if isinstance(x, str) else x
for x in array.shape])
ph = tf.compat.v1.placeholder(dtype=dtype,
name=name + "_placeholder", shape=shape)
var = tf.compat.v1.Variable(ph,
name=name)
op = tf.compat.v1.assign(var, ph)
return FeedOnce(ph, var, op)
# Create placeholders, variables and assign operators
# for data sources that we will only feed once
local.feed_once = { a.name : _make_feed_once_tuple(a)
for a in feed_once }
#=======================================================
# Construct the list of data sources that need feeding
#=======================================================
# Data sources from input staging_areas
src_sa = [q for sq in list(local.sources.values()) for q in sq]
all_staging_areas = local.feed_many + src_sa
input_sources = { a for q in all_staging_areas
for a in q.fed_arrays}
# Data sources from feed once variables
input_sources.update(list(local.feed_once.keys()))
local.input_sources = input_sources
return FD
def _construct_tensorflow_expression(slvr_cfg, feed_data, device, shard):
""" Constructs a tensorflow expression for computing the RIME """
zero = tf.constant(0)
src_count = zero
src_ph_vars = feed_data.src_ph_vars
LSA = feed_data.local
polarisation_type = slvr_cfg['polarisation_type']
# Pull RIME inputs out of the feed staging_area
# of the relevant shard, adding the feed once
# inputs to the dictionary
D = LSA.feed_many[shard].get_to_attrdict()
D.update({k: fo.var for k, fo in list(LSA.feed_once.items())})
with tf.device(device):
# Infer chunk dimensions
model_vis_shape = tf.shape(input=D.model_vis)
ntime, nbl, nchan, npol = [model_vis_shape[i] for i in range(4)]
# Infer float and complex type
FT, CT = D.uvw.dtype, D.model_vis.dtype
# Compute sine and cosine of parallactic angles
pa_sin, pa_cos = rime.parallactic_angle_sin_cos(
D.parallactic_angles[:, :] +
D.feed_angles[None, :])
# Compute feed rotation
feed_rotation = rime.feed_rotation(pa_sin, pa_cos, CT=CT,
feed_type=polarisation_type)
def antenna_jones(radec, stokes):
"""
Compute the jones terms for each antenna.
lm, stokes and alpha are the source variables.
"""
lm = rime.radec_to_lm(radec, D.phase_centre)
# Compute the complex phase
cplx_phase = rime.phase(lm, D.uvw, D.frequency, CT=CT)
# Check for nans/infs in the complex phase
phase_msg = ("Check that '1 - l**2 - m**2 >= 0' holds "
"for all your lm coordinates. This is required "
"for 'n = sqrt(1 - l**2 - m**2) - 1' "
"to be finite.")
phase_real = tf.debugging.check_numerics(tf.math.real(cplx_phase), phase_msg)
phase_imag = tf.debugging.check_numerics(tf.math.imag(cplx_phase), phase_msg)
# Compute the square root of the brightness matrix
# (as well as the sign)
bsqrt, sgn_brightness = rime.b_sqrt(stokes, CT=CT,
polarisation_type=polarisation_type)
# Check for nans/infs in the bsqrt
bsqrt_msg = ("Check that your stokes parameters "
"satisfy I**2 >= Q**2 + U**2 + V**2. "
"Montblanc performs a cholesky decomposition "
"of the brightness matrix and the above must "
"hold for this to produce valid values.")
bsqrt_real = tf.debugging.check_numerics(tf.math.real(bsqrt), bsqrt_msg)
bsqrt_imag = tf.debugging.check_numerics(tf.math.imag(bsqrt), bsqrt_msg)
# Compute the direction dependent effects from the beam
#radec_prime = radec * tf.cast(tf.stack([-1.0, 1.0]), radec.dtype)
#phase_centre_prime = D.phase_centre * tf.cast(tf.stack([-1.0, 1.0]), D.phase_centre.dtype)
#def normang(val):
# """ Normalizes angle between [-pi, pi) """
# return ( val + np.pi) % ( 2 * np.pi ) - np.pi
#cube_pos = normang(normang(radec_prime) - normang(phase_centre_prime))
ejones = rime.e_beam(lm, D.frequency,
D.pointing_errors, D.antenna_scaling,
pa_sin, pa_cos,
D.beam_extents, D.beam_freq_map, D.ebeam)
deps = [phase_real, phase_imag, bsqrt_real, bsqrt_imag]
deps = [] # Do nothing for now
# Combine the brightness square root, complex phase,
# feed rotation and beam dde's
with tf.control_dependencies(deps):
antenna_jones = rime.create_antenna_jones(bsqrt, cplx_phase,
feed_rotation, ejones,
FT=FT)
return antenna_jones, sgn_brightness
# While loop condition for each point source type
def point_cond(coherencies, npsrc, src_count):
return tf.less(npsrc, src_ph_vars.npsrc)
def gaussian_cond(coherencies, ngsrc, src_count):
return tf.less(ngsrc, src_ph_vars.ngsrc)
def sersic_cond(coherencies, nssrc, src_count):
return tf.less(nssrc, src_ph_vars.nssrc)
# While loop bodies
def point_body(coherencies, npsrc, src_count):
""" Accumulate visiblities for point source batch """
S = LSA.sources['npsrc'][shard].get_to_attrdict()
# Maintain source counts
nsrc = tf.shape(input=S.point_lm)[0]
src_count += nsrc
npsrc += nsrc
ant_jones, sgn_brightness = antenna_jones(S.point_lm,
S.point_stokes)
shape = tf.ones(shape=[nsrc,ntime,nbl,nchan], dtype=FT)
coherencies = rime.sum_coherencies(D.antenna1, D.antenna2,
shape, ant_jones, sgn_brightness, coherencies)
return coherencies, npsrc, src_count
def gaussian_body(coherencies, ngsrc, src_count):
""" Accumulate coherencies for gaussian source batch """
S = LSA.sources['ngsrc'][shard].get_to_attrdict()
# Maintain source counts
nsrc = tf.shape(input=S.gaussian_lm)[0]
src_count += nsrc
ngsrc += nsrc
ant_jones, sgn_brightness = antenna_jones(S.gaussian_lm,
S.gaussian_stokes)
gauss_shape = rime.gauss_shape(D.uvw, D.antenna1, D.antenna2,
D.frequency, S.gaussian_shape)
coherencies = rime.sum_coherencies(D.antenna1, D.antenna2,
gauss_shape, ant_jones, sgn_brightness, coherencies)
return coherencies, ngsrc, src_count
def sersic_body(coherencies, nssrc, src_count):
""" Accumulate coherencies for sersic source batch """
S = LSA.sources['nssrc'][shard].get_to_attrdict()
# Maintain source counts
nsrc = tf.shape(input=S.sersic_lm)[0]
src_count += nsrc
nssrc += nsrc
ant_jones, sgn_brightness = antenna_jones(S.sersic_lm,
S.sersic_stokes)
sersic_shape = rime.sersic_shape(D.uvw, D.antenna1, D.antenna2,
D.frequency, S.sersic_shape)
coherencies = rime.sum_coherencies(D.antenna1, D.antenna2,
sersic_shape, ant_jones, sgn_brightness, coherencies)
return coherencies, nssrc, src_count
with tf.device(device):
base_coherencies = tf.zeros(shape=[ntime,nbl,nchan,npol], dtype=CT)
# Evaluate point sources
summed_coherencies, npsrc, src_count = tf.while_loop(
cond=point_cond, body=point_body,
loop_vars=[base_coherencies, zero, src_count])
# Evaluate gaussians
summed_coherencies, ngsrc, src_count = tf.while_loop(
cond=gaussian_cond, body=gaussian_body,
loop_vars=[summed_coherencies, zero, src_count])
# Evaluate sersics
summed_coherencies, nssrc, src_count = tf.while_loop(
cond=sersic_cond, body=sersic_body,
loop_vars=[summed_coherencies, zero, src_count])
# Post process visibilities to produce model visibilites and chi squared
model_vis, chi_squared = rime.post_process_visibilities(
D.antenna1, D.antenna2, D.direction_independent_effects, D.flag,
D.weight, D.model_vis, summed_coherencies, D.observed_vis)
# Create enstaging_area operation
put_op = LSA.output.put_from_list([D.descriptor, model_vis, chi_squared])
# Return descriptor and enstaging_area operation
return D.descriptor, put_op
def _get_data(data_source, context):
""" Get data from the data source, checking the return values """
try:
# Get data from the data source
data = data_source.source(context)
# Complain about None values
if data is None:
raise ValueError("'None' returned from "
"data source '{n}'".format(n=context.name))
# We want numpy arrays
elif not isinstance(data, np.ndarray):
raise TypeError("Data source '{n}' did not "
"return a numpy array, returned a '{t}'".format(
t=type(data)))
# And they should be the right shape and type
elif data.shape != context.shape or data.dtype != context.dtype:
raise ValueError("Expected data of shape '{esh}' and "
"dtype '{edt}' for data source '{n}', but "
"shape '{rsh}' and '{rdt}' was found instead".format(
n=context.name, esh=context.shape, edt=context.dtype,
rsh=data.shape, rdt=data.dtype))
return data
except Exception as e:
ex = ValueError("An exception occurred while "
"obtaining data from data source '{ds}'\n\n"
"{e}\n\n"
"{help}".format(ds=context.name,
e=str(e), help=context.help()))
six.reraise(ValueError, ex, sys.exc_info()[2])
def _supply_data(data_sink, context):
""" Supply data to the data sink """
try:
data_sink.sink(context)
except Exception as e:
ex = ValueError("An exception occurred while "
"supplying data to data sink '{ds}'\n\n"
"{e}\n\n"
"{help}".format(ds=context.name,
e=str(e), help=context.help()))
six.reraise(ValueError, ex, sys.exc_info()[2])
def _iter_args(iter_dims, cube):
iter_strides = cube.dim_extent_size(*iter_dims)
return list(zip(iter_dims, iter_strides))
def _uniq_log2_range(start, size, div):
start = np.log2(start)
size = np.log2(size)
int_values = np.int32(np.logspace(start, size, div, base=2)[:-1])
return np.flipud(np.unique(int_values))
def _budget(cube, slvr_cfg):
# Figure out a viable dimension configuration
# given the total problem size
mem_budget = slvr_cfg.get('mem_budget', 2*ONE_GB)
bytes_required = cube.bytes_required()
src_dims = mbu.source_nr_vars() + ['nsrc']
dim_names = ['na', 'nbl', 'ntime'] + src_dims
global_sizes = cube.dim_global_size(*dim_names)
na, nbl, ntime = global_sizes[:3]
# Keep track of original dimension sizes and any reductions that are applied
original_sizes = { r: s for r, s in zip(dim_names, global_sizes) }
applied_reductions = {}
def _reduction():
# Reduce over time first
trange = _uniq_log2_range(1, ntime, 5)
for t in trange[0:1]:
yield [('ntime', t)]
# Attempt reduction over source
sbs = slvr_cfg['source_batch_size']
srange = _uniq_log2_range(10, sbs, 5) if sbs > 10 else 10
src_dim_gs = global_sizes[3:]
for bs in srange:
yield [(d, bs if bs < gs else gs) for d, gs
in zip(src_dims, src_dim_gs)]
# Try the rest of the timesteps
for t in trange[1:]:
yield [('ntime', t)]
# Reduce by baseline
for bl in _uniq_log2_range(na, nbl, 5):
yield [('nbl', bl)]
for reduction in _reduction():
if bytes_required > mem_budget:
for dim, size in reduction:
applied_reductions[dim] = size
cube.update_dimension(dim, lower_extent=0, upper_extent=size)
else:
break
bytes_required = cube.bytes_required()
# Log some information about the memory_budget
# and dimension reduction
montblanc.log.info(("Selected a solver memory budget of {rb} "
"given a hard limit of {mb}.").format(
rb=mbu.fmt_bytes(bytes_required),
mb=mbu.fmt_bytes(mem_budget)))
if len(applied_reductions) > 0:
montblanc.log.info("The following dimension reductions "
"were applied:")
for k, v in list(applied_reductions.items()):
montblanc.log.info('{p}{d}: {id} => {rd}'.format
(p=' '*4, d=k, id=original_sizes[k], rd=v))
else:
montblanc.log.info("No dimension reductions were applied.")
return applied_reductions, bytes_required
DimensionUpdate = attr.make_class("DimensionUpdate",
['size', 'prov'], slots=True, frozen=True)
def _apply_source_provider_dim_updates(cube, source_providers, budget_dims):
"""
Given a list of source_providers, apply the list of
suggested dimension updates given in provider.updated_dimensions()
to the supplied hypercube.
Dimension global_sizes are always updated with the supplied sizes and
lower_extent is always set to 0. upper_extent is set to any reductions
(current upper_extents) existing in budget_dims, otherwise it is set
to global_size.
"""
# Create a mapping between a dimension and a
# list of (global_size, provider_name) tuples
update_map = collections.defaultdict(list)
for prov in source_providers:
for dim_tuple in prov.updated_dimensions():
name, size = dim_tuple
# Don't accept any updates on the nsrc dimension
# This is managed internally
if name == 'nsrc':
continue
dim_update = DimensionUpdate(size, prov.name())
update_map[name].append(dim_update)
# No dimensions were updated, quit early
if len(update_map) == 0:
return cube.bytes_required()
# Ensure that the global sizes we receive
# for each dimension are unique. Tell the user
# when conflicts occur
update_list = []
for name, updates in list(update_map.items()):
if not all(updates[0].size == du.size for du in updates[1:]):
raise ValueError("Received conflicting "
"global size updates '{u}'"
" for dimension '{n}'.".format(n=name, u=updates))
update_list.append((name, updates[0].size))
montblanc.log.info("Updating dimensions {} from "
"source providers.".format(str(update_list)))
# Now update our dimensions
for name, global_size in update_list:
# Defer to existing any existing budgeted extent sizes
# Otherwise take the global_size
extent_size = budget_dims.get(name, global_size)
# Take the global_size if extent_size was previously zero!
extent_size = global_size if extent_size == 0 else extent_size
# Clamp extent size to global size
if extent_size > global_size:
extent_size = global_size
# Update the dimension
cube.update_dimension(name,
global_size=global_size,
lower_extent=0,
upper_extent=extent_size)
# Handle global number of sources differently
# It's equal to the number of
# point's, gaussian's, sersic's combined
nsrc = sum(cube.dim_global_size(*mbu.source_nr_vars()))
# Extent size will be equal to whatever source type
# we're currently iterating over. So just take
# the maximum extent size given the sources
es = max(cube.dim_extent_size(*mbu.source_nr_vars()))
cube.update_dimension('nsrc',
global_size=nsrc,
lower_extent=0,
upper_extent=es)
# Return our cube size
return cube.bytes_required()
def _setup_hypercube(cube, slvr_cfg):
""" Sets up the hypercube given a solver configuration """
mbu.register_default_dimensions(cube, slvr_cfg)
# Configure the dimensions of the beam cube
cube.register_dimension('beam_lw', 2,
description='E Beam cube l width')
cube.register_dimension('beam_mh', 2,
description='E Beam cube m height')
cube.register_dimension('beam_nud', 2,
description='E Beam cube nu depth')
# =========================================
# Register hypercube Arrays and Properties
# =========================================
from montblanc.impl.rime.tensorflow.config import (A, P)
def _massage_dtypes(A, T):
def _massage_dtype_in_dict(D):
new_dict = D.copy()
new_dict['dtype'] = mbu.dtype_from_str(D['dtype'], T)
return new_dict
return [_massage_dtype_in_dict(D) for D in A]
dtype = slvr_cfg['dtype']
is_f32 = dtype == 'float'
T = {
'ft' : np.float32 if is_f32 else np.float64,
'ct' : np.complex64 if is_f32 else np.complex128,
'int' : int,
}
cube.register_properties(_massage_dtypes(P, T))
cube.register_arrays(_massage_dtypes(A, T))
def _partition(iter_dims, data_sources):
"""
Partition data sources into
1. Dictionary of data sources associated with radio sources.
2. List of data sources to feed multiple times.
3. List of data sources to feed once.
"""
src_nr_vars = set(source_var_types().values())
iter_dims = set(iter_dims)
src_data_sources = collections.defaultdict(list)
feed_many = []
feed_once = []
for ds in data_sources:
# Is this data source associated with
# a radio source (point, gaussian, etc.?)
src_int = src_nr_vars.intersection(ds.shape)
if len(src_int) > 1:
raise ValueError("Data source '{}' contains multiple "
"source types '{}'".format(ds.name, src_int))
elif len(src_int) == 1:
# Yep, record appropriately and iterate
src_data_sources[src_int.pop()].append(ds)
continue
# Are we feeding this data source multiple times
# (Does it possess dimensions on which we iterate?)
if len(iter_dims.intersection(ds.shape)) > 0:
feed_many.append(ds)
continue
# Assume this is a data source that we only feed once
feed_once.append(ds)
return src_data_sources, feed_many, feed_once
|
ska-sa/montblanc
|
montblanc/impl/rime/tensorflow/RimeSolver.py
|
Python
|
gpl-2.0
| 51,658
|
[
"Gaussian"
] |
48dca86ca1b6f26ae7cebe8ea4b72d12d496a5ce8b5e8f9dfbd1c35f8052ce8a
|
# Rewritten by RayzoR
import sys
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "135_TempleExecutor"
# NPCs
SHEGFIELD = 30068
ALEX = 30291
SONIN = 31773
PANO = 30078
# ITEMs
CARGO = 10328
CRYSTAL = 10329
MAP = 10330
SONIN_CR = 10331
PANO_CR = 10332
ALEX_CR = 10333
BADGE = 10334
# MONSTERs
NPC = [20781,21104,21105,21106,21107]
class Quest (JQuest) :
def __init__(self,id,name,descr):
JQuest.__init__(self,id,name,descr)
self.questItemIds = [CARGO,CRYSTAL,MAP,SONIN_CR,ALEX_CR,PANO_CR]
def onEvent (self,event,st) :
htmltext = event
id = st.getState()
cond = st.getInt("cond")
if event == "30068-02.htm" :
st.set("cond","1")
st.setState(STARTED)
st.playSound("ItemSound.quest_accept")
elif event == "30068-09.htm" :
st.playSound("ItemSound.quest_finish")
st.unset("talk")
st.exitQuest(False)
st.giveItems(57, 16924)
st.giveItems(BADGE, 1)
if st.getPlayer().getLevel() >= 35 and st.getPlayer().getLevel() <= 40:
st.addExpAndSp(30000,2000)
elif event == "30068-03.htm" :
st.set("cond","2")
st.playSound("ItemSound.quest_middle")
elif event == "30291-06.htm" :
st.set("cond","3")
st.playSound("ItemSound.quest_middle")
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not on a quest that involves this NPC, or you don't meet this NPC's minimum quest requirements.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
cond = st.getInt("cond")
if id == COMPLETED :
htmltext = "<html><body>This quest has already been completed.</body></html>"
elif npcId == SHEGFIELD :
if cond == 0 :
if player.getLevel() >= 35 :
htmltext = "30068-01.htm"
else:
htmltext = "30068-00.htm"
st.exitQuest(1)
elif cond == 1 :
htmltext = "30068-02.htm"
elif cond in [2,3,4] :
htmltext = "30068-04.htm"
elif cond == 5 :
if st.getQuestItemsCount(SONIN_CR) and st.getQuestItemsCount(PANO_CR) and st.getQuestItemsCount(ALEX_CR):
htmltext = "30068-05.htm"
st.takeItems(SONIN_CR, -1)
st.takeItems(PANO_CR, -1)
st.takeItems(ALEX_CR, -1)
st.set("talk","1")
elif st.getInt("talk"):
htmltext = "30068-06.htm"
elif npcId == ALEX :
if cond == 2 :
htmltext = "30291-01.htm"
elif cond == 3 :
htmltext = "30291-07.htm"
elif cond == 4 :
if st.getQuestItemsCount(SONIN_CR) and st.getQuestItemsCount(PANO_CR):
st.takeItems(MAP, -1)
st.giveItems(ALEX_CR,1)
st.playSound("ItemSound.quest_middle")
st.set("cond","5")
htmltext = "30291-09.htm"
else:
htmltext = "30291-08.htm"
elif cond == 5 :
htmltext = "30291-10.htm"
elif npcId == SONIN :
if cond == 4 :
if st.getQuestItemsCount(CARGO) >= 10:
htmltext = "31773-01.htm"
st.playSound("ItemSound.quest_middle")
st.takeItems(CARGO, -1)
st.giveItems(SONIN_CR,1)
else:
htmltext = "31773-02.htm"
elif npcId == PANO :
if cond == 4 :
if st.getQuestItemsCount(CRYSTAL) >= 10:
htmltext = "30078-01.htm"
st.playSound("ItemSound.quest_middle")
st.takeItems(CRYSTAL, -1)
st.giveItems(PANO_CR,1)
else:
htmltext = "30078-02.htm"
return htmltext
def onKill(self,npc,player,isPet):
st = player.getQuestState(qn)
if not st : return
if st.getState() != STARTED : return
if st.getInt("cond")==3 :
if st.getQuestItemsCount(CARGO) < 10:
st.giveItems(CARGO,1)
st.playSound("ItemSound.quest_itemget")
elif st.getQuestItemsCount(CRYSTAL) < 10:
st.giveItems(CRYSTAL,1)
st.playSound("ItemSound.quest_itemget")
elif st.getQuestItemsCount(MAP) < 10:
st.giveItems(MAP,1)
if st.getQuestItemsCount(MAP) >= 10 and st.getQuestItemsCount(CARGO) >= 10 and st.getQuestItemsCount(CRYSTAL) >= 10:
st.set("cond","4")
st.playSound("ItemSound.quest_middle")
else :
st.playSound("ItemSound.quest_itemget")
return
QUEST = Quest(135,qn,"Temple Executor")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(SHEGFIELD)
QUEST.addTalkId(SHEGFIELD)
QUEST.addTalkId(ALEX)
QUEST.addTalkId(SONIN)
QUEST.addTalkId(PANO)
for mob in NPC :
QUEST.addKillId(mob)
|
zenn1989/scoria-interlude
|
L2Jscoria-Game/data/scripts/quests/135_TempleExecutor/__init__.py
|
Python
|
gpl-3.0
| 4,982
|
[
"CRYSTAL"
] |
eadf984afb876f4069f00e489e16af8e581659a2166ad35ebc359ebd51d875fe
|
"""Event arrays are 2D label arrays (time x ROI) that are generated from an
array of fluorescent traces of the same size.
Uses the following inequality to determine if an event occured at a specific time in a cell:
dF/F of cell > (baseline of cell + std_threshold * std of cell * alpha)
See the findEvents() docstring for more info.
These routines are used to create and analyze event arrays. Note that
some of the event utility functions return masked numpy arrays. This
is because generally, there are different number of events in each
cell during each trial. Anywhere there wasn't an event is a 'np.nan'
value, and the mask will ensure that it isn't used to calcuate things
like mean(), min(), max() etc.
"""
import numpy as np
import traces as tm
from sklearn.mixture import GMM
import scipy.ndimage as nd
import mahotas
__all__ = ['findEvents', 'findEventsGMM', 'findEventsBackground',
'getCounts', 'getStartsAndStops', 'getDurations', 'getAvgAmplitudes', 'getWeightedEvents',
'fitGaussianMixture1D', 'getGMMBaselines']
#----------------------------------------EVENT FINDING FUNCTIONS AND WRAPPERS-----------------------------------
def findEvents(traces, stds, std_threshold=2.5, falling_std_threshold=None, baselines=None, boxWidth=3, minimum_length=2, alpha=None):
"""Core event finding routine with flexible syntax.
Uses the following inequality to determine if an event occured at a specific time in a cell:
dF/F of cell > (baseline of cell + std_threshold * std of cell * alpha)
By default, the baseline is 0.0 (the dF/F traces have been baselined). This baseline can be
explicitly specified using the `baselines` parameter. If `baselines` is a 1d array, it is a
global correction value. If `baselines` is exactly the same size as `traces`, the routine
assumes that the baselines have been explicitly specificed across all cells, trials and frames.
If `baselines` is of size (time x trials), then the routine assumes that the basline value has
been determined for the whole population on a trial by trial basis. This is done in the routines
`findEventsBackground` and `findEventsGMM`.
The `alpha` parameter is here for flexibility. It allows for the scaling of the threshold of detection
on a cell by cell, frame by frame basis indepedent of the noise of a cell or it's baseline value.
If specified it must be the exact same size as `traces`. By default it is set to 1.0.
The routine returns an event array exactly the same size as `traces`, where each event is labeled with
a unique number (an integer). The background is labeled with '0'. This can be used in all the utility
routines below.
:param: traces - 2 or 3d numpy array of baselined and normalized traces (time x cells, or time x cells x trials)
:param: stds - 1 or 2d numpy event array of per-cell std values (cells, or cells x trials)
:param: std_threshold - multiple of per-cell STD to use for an event (float)
:param: falling_std_threshold - optional multiple of per-cell STD to use as end of an event (float)
:param: baselines - optional estimation of the baseline values of the cells
:param: boxWidth - filter size for smoothing traces and background values before detection
:param: minimum_length - minimum length of an event
:param: alpha - optional scaling parameter for adjusting thresholds
:returns: numpy array same shape and size of traces, with each event given a unique integer label
"""
if traces.ndim == 2:
traces = np.atleast_3d(traces) # time x cells x trials
stds = np.atleast_2d(stds).T # cells x trials
time, cells, trials = traces.shape
events = np.zeros_like(traces)
# broadcasting of baselines. ends up as time x cells x trials. this is really annoying,
# but relying on numpy to broadcast things was tricky and problembatic. idea here is to
# get baselines identical to traces
if baselines is None: # no baseline correction, default
full_baselines = np.zeros_like(traces)
elif baselines.shape == (time): # one global correction
full_baselines = np.zeros_like(traces)
for trial in range(trials):
for cell in range(cells):
full_baselines[:,cell,trial] = baselines
elif baselines.shape ==(time, cells): # full, but only one trial
full_baselines = baselines[:,:,None]
elif baselines.shape == (time, trials): # modeled on a trial by trial basis
full_baselines = np.zeros_like(traces)
for trial in range(trials):
for cell in range(cells):
full_baselines[:,cell,trial] = baselines[:,trial]
# this is a check to prevent a dip in the global population from calling stuff responders
# basically, if the estimated baseline falls below zero, we fall back to the implicit background
# value of 0.0
full_baselines[full_baselines<0.0] = 0.0
# alpha is a scaling factor for event detection. if used it has to be the same size and shape as traces.
# no broadcasting is done here. it scales the threshold for detection so by default it is 1.0 everywhere.
if alpha is None:
alpha = np.ones_like(full_baselines)
# smooth traces and baselines
if boxWidth is not 0:
traces_smoothed = nd.convolve1d(traces, np.array([1]*boxWidth)/float(boxWidth), axis=0)
baselines_smoothed = nd.convolve1d(full_baselines, np.array([1]*boxWidth)/float(boxWidth), axis=0)
# detect events
for trial in range(trials):
for cell in range(cells):
events[:,cell,trial] = traces_smoothed[:,cell,trial] > baselines_smoothed[:,cell,trial] + (stds[cell, trial] * float(std_threshold) * alpha[:,cell,trial])
# filter for minimum length
events = mahotas.label(events, np.array([1,1])[:,np.newaxis,np.newaxis])[0]
for single_event in range(1, events.max()+1):
if (events == single_event).sum() <= minimum_length:
events[events == single_event] = 0
events = events>0
# if a falling std is specified, extend events until they drop below that threshold
if falling_std_threshold is not None:
for trial in range(trials):
for cell in range(cells):
falling_thresh_events = traces_smoothed[:,cell,trial] > baselines_smoothed[:,cell,trial] + (stds[cell, trial] * float(falling_std_threshold) * alpha[:,cell,trial])
for event_end in np.argwhere(np.diff(events[:,cell,trial].astype(int)) == -1):
j = event_end
while (j<time) and ((events[j,cell,trial]) or (falling_thresh_events[j])):
events[j,cell,trial] = events[j-1,cell,trial]
j = j + 1
# finally label the event array and return it.
events = mahotas.label(events>0, np.array([1,1])[:,np.newaxis,np.newaxis])[0]
return np.squeeze(events)
def findEventsGMM(traces, stds, std_threshold=2.5, falling_std_threshold=None, boxWidth=3, minimum_length=2):
"""Wrapper for findEvents with baseline estimation using a mixture of gaussians model.
The major idea here is to use a mixture of two gaussians to model
the baselines within each trial as a mixture of two gaussians -
one for the 'baseline' and one for all the 'bright' responding
pixels. At each time point, the ROI brightnesses are fit with
with this GMM. The means of the two distributions are initialized
to the background 'cell' and all points brighter than the mean of
all ROIs. After fitting, the smaller of the two means at every
point is taken to be the 'background'. This generally is very
close to the average of the entire frame, but is generally smaller
during full field events, because the larger gaussian 'sucks up'
the spurious bright pixels.
See getGMMBaselines() for more information.
:param: traces - 2 or 3d numpy array of baselined and normalized traces (time x cells, or time x cells x trials)
:param: stds - 1 or 2d numpy event array of per-cell std values (cells, or cells x trials)
:param: std_threshold - multiple of per-cell STD to use for an event (float)
:param: falling_std_threshold - optional multiple of per-cell STD to use as end of an event (float)
:param: baselines - optional estimation of the baseline values of the cells
:param: boxWidth - filter size for smoothing traces and background values before detection
:param: minimum_length - minimum length of an event
:returns: numpy array same shape and size of traces, with each event given a unique integer label
"""
if traces.ndim == 2:
traces = np.atleast_3d(traces) # time x cells x trials
stds = np.atleast_2d(stds).T # cells x trials
baselines = getGMMBaselines(traces) # time x trials (one population baseline trace for all cells)
return findEvents(traces, stds, std_threshold, falling_std_threshold, baselines, boxWidth, minimum_length)
def findEventsBackground(traces, stds, std_threshold=2.5, falling_std_threshold=None, boxWidth=3, minimum_length=2):
"""Wrapper for findEvents with baseline estimation using the background..
Here, we estimate the population baseline for all the cells as the
'background cell', or cell 0. It is generally a fair estimation
of the general response of the field of view, but is imperfect due
to segmentation errors.
:param: traces - 2 or 3d numpy array of baselined and normalized traces (time x cells, or time x cells x trials)
:param: stds - 1 or 2d numpy event array of per-cell std values (cells, or cells x trials)
:param: std_threshold - multiple of per-cell STD to use for an event (float)
:param: falling_std_threshold - optional multiple of per-cell STD to use as end of an event (float)
:param: baselines - optional estimation of the baseline values of the cells
:param: boxWidth - filter size for smoothing traces and background values before detection
:param: minimum_length - minimum length of an event
:returns: numpy array same shape and size of traces, with each event given a unique integer label
"""
if traces.ndim == 2:
traces = np.atleast_3d(traces) # time x cells x trials
stds = np.atleast_2d(stds).T # cells x trials
baselines = traces[:,0,:].copy() # time x trials (one population baseline trace for all cells)
return findEvents(traces, stds, std_threshold, falling_std_threshold, baselines, boxWidth, minimum_length)
#----------------------------------------EVENT UTILITY FUNCTIONS-----------------------------------
def getStartsAndStops(event_array):
"""This routine takes an event_array and returns the starting and
stopping times for all events in the array.
:param: event_array - 2d or 3d numpy event array (time x cells, or time x cells x trials))
:returns: masked numpy arrays, one for starting times and stopping times.
size is cells x max event number or cells x trials x max event number.
masked array is to account for the variable number of events in each cell
"""
event_array = np.atleast_3d(event_array)
max_num_events = getCounts(event_array).max()
time, cells, trials = event_array.shape
starts = np.zeros((cells, trials, int(max_num_events)))
stops = np.zeros((cells, trials, int(max_num_events)))
starts[:] = np.nan
stops[:] = np.nan
for cell in range(cells):
for trial in range(trials):
event_ids = np.unique(event_array[:,cell,trial])[1:]
for i, event_id in enumerate(event_ids):
starts[cell, trial, i] = np.argwhere(event_array[:,cell,trial] == event_id).flatten()[0]
stops[cell, trial, i] = np.argwhere(event_array[:,cell,trial] == event_id).flatten()[-1]
starts = np.ma.array(starts, mask=np.isnan(starts))
starts = np.squeeze(starts)
stops = np.ma.array(stops, mask=np.isnan(stops))
stops = np.squeeze(stops)
return starts, stops
def getCounts(event_array, time_range=None):
"""This routine takes an event_array and optionally a time range
and returns the number of events in each cell.
:param: event_array - 2 or 3d numpy event array (time x cells or time x cells x trials)
:param: time_range - optional list of 2 numbers limiting the time range to count events
:returns: 1d or 2d numpy array of counts (cells or cells x trials)
"""
if time_range is not None:
event_array = event_array[time_range[0]:time_range[1],:] # note that this works for 2 or 3d arrays...
if event_array.ndim is 2:
event_array = event_array[:,:,np.newaxis]
time, cells, trials = event_array.shape
counts = np.zeros((cells,trials))
for trial in range(trials):
for cell in range(cells):
counts[cell, trial] = np.unique(event_array[:,cell,trial]).size - 1
return np.squeeze(counts)
def getDurations(event_array, time_range=None):
"""This routine takes an event_array (time x cells) and returns
the duration of events in each cell.
:param: event_array - 2 or 3d numpy event array (time x cells, or time x cells x trials)
:param: time_range - optional list of 2 numbers limiting the time range to count events
:returns: 2d masked numpy array of event durations. size is cells x largest number of events.
masked entries are to account for variable number of events
"""
event_array = np.atleast_3d(event_array)
max_num_events = getCounts(event_array).max()
time, cells, trials = event_array.shape
durations = np.zeros((cells, trials, int(max_num_events)))
durations[:] = np.nan
for cell in range(cells):
for trial in range(trials):
event_ids = np.unique(event_array[:,cell,trial])[1:]
for i, event_id in enumerate(event_ids):
durations[cell, trial, i] = np.argwhere(event_array[:,cell,trial] == event_id).size
durations = np.ma.array(durations, mask=np.isnan(durations))
durations = np.squeeze(durations)
return durations
def getAvgAmplitudes(event_array, trace_array, time_range=None):
"""This routine takes an event_array (time x cells) and
corresponding trace array and returns the average amplitudes of
events in each cell.
:param: event_array - 2 or 3d numpy event array (time x cells, or time x cells x trials)
:param: time_range - optional list of 2 numbers limiting the time range to count events
:returns: 2d masked numpy array of event average amplitudes. size is cells x largest number of events.
masked entries are account for variable number of events
"""
event_array = np.atleast_3d(event_array)
trace_array= np.atleast_3d(trace_array)
max_num_events = getCounts(event_array).max()
time, cells, trials = event_array.shape
amps = np.zeros((cells, trials, int(max_num_events)))
amps[:] = np.nan
for cell in range(cells):
for trial in range(trials):
event_ids = np.unique(event_array[:,cell,trial])[1:]
for i, event_id in enumerate(event_ids):
amps[cell, trial, i] = trace_array[event_array == event_id].mean()
amps = np.ma.array(amps, mask=np.isnan(amps))
amps = np.squeeze(amps)
return np.ma.masked_array(amps, np.isnan(amps))
def getWeightedEvents(event_array, trace_array):
"""This routine takes an event array and corresponding trace array
and replaces the event labels with the average amplitude of the
event.
:param: event_array - 2 or 3d numpy event array (time x cells, or time x cells x trials)
:param: trace_array - 2 or 3d numpy event array (time x cells, or time x cells x trials)
:returns: 2d numpy array same shape and size of event_array, zero where there
weren't events, and the average event amplitude for the event otherwise.
"""
weighted_events = np.zeros_like(event_array, dtype=float)
for i in np.unique(event_array)[1:]:
weighted_events[event_array==i] = trace_array[event_array==i].mean()
return weighted_events
#----------------------------------------GMM UTILITY FUNCTIONS-----------------------------------
def fitGaussianMixture1D(data, n=2, set_mean_priors=True):
"""Routine for fitting a 1d array to a mixture of `n` gaussians.
if 'set_mean_priors' is True (the default), we initialize the GMM
model with means equal to the first point (the 'background' cell)
and all ROIs larger than the mean. Otherwise, we have random means.
After fitting, we return the means, stds, and weights of the GMM,
along with the BIC, AIC, and the model itself.
:param: data - 1d array of data to fit
:param: n - number of gaussians to fit, defaults to 2
:param: set_mean_priors - boolean, if true, initializes the means of a mixture of 2 gaussians
:returns: tuple of (means, stds, weights, BIC, AIC, GMM model object)
"""
if set_mean_priors:
g = GMM(n_components=n, init_params='wc', n_init=5)
g.means_ = np.zeros((n, 1))
g.means_[0,0] = data[0] # first datapoint is the background value... should be near 0.0
g.means_[1,0] = data[data > data[0]].mean()
else:
g = GMM(n_components=n, n_init=5)
g.fit(data)
return (np.squeeze(g.means_.flatten()),
np.squeeze(np.sqrt(g.covars_).flatten()),
np.squeeze(g.weights_).flatten(),
g.bic(data),
g.aic(data),
g)
def getGMMBaselines(traces):
"""Wrapper for fitGaussianMixture1D() for findEventsGMM().
:param: traces - 2 or 3d numpy array of dF/F (time x cells, or time x cells x trials)
:returns: 1 or 2d numpy array of estimated baseline (time or time x trials).
"""
traces = np.atleast_3d(traces) # time x cells x trials
time, cells, trials = traces.shape
gmmBaselines = np.zeros((time, trials)) # one baseline estimation for each trial
for trial in range(trials):
for frame in range(time):
means, stds, weights, bic, aic, model = fitGaussianMixture1D(traces[frame,:,trial], 2)
gmmBaselines[frame, trial] = means.min()
return gmmBaselines
#----------------------------------------DEPRECATED EVENT FINDING FUNCTIONS-----------------------------------
def findEventsAtThreshold(traces, stds, rising_threshold, falling_threshold=0.75, first_mode='rising', second_mode='falling', boxWidth=3, distance_cutoff=2):
"""----------------DEPRECATED-----------------------------
Routine to find events based on the method in Dombeck et al., 2007.
Relies on the multi-dimensional findLevels function in traceRoutines.
Finds all two sets of points in `traces` that cross threshold multiples
of `stds`. The first_mode and second_mode parameters determine if the
crossings are rising, or falling. The trace is filtered with a flat
kernel of width `boxWidth` and successive crossings are paired. Any
crossings less that `distance_cutoff` apart are discarded.
This routine is called by findEventsDombeck().
:param: traces - 2 or 3d numpy array of dF/F traces (time x cells, or time x cells x trial)
:param: stds - 1 or 2d numpy array of values representing noise levels in the data (cells, or cells x trials)
:param: rising_threshold - float used for first crossings
:param: falling_threshold - float used for second crossings
:param: boxWidth - filter size
:param: distance_cutoff - eliminate crossings pairs closer than this- eliminates noise
:returns: 2d or 3d array same size and dimension as traces, labeled with event number
"""
# insure that we have at least one 'trial' dimension.
if traces.ndim == 2:
traces = np.atleast_3d(traces)
stds = np.atleast_2d(stds)
time, cells, trials = traces.shape
# normally tm.findLevels works with a single number, but if the shapes are right then it will broadcast correctly with a larger array
first_crossings = tm.findLevelsNd(traces, np.array(stds)*rising_threshold, mode=first_mode, axis=0, boxWidth=boxWidth)
second_crossings = tm.findLevelsNd(traces, np.array(stds)*falling_threshold, mode=second_mode, axis=0, boxWidth=boxWidth)
events = np.zeros_like(traces)
i=1
for cell in range(cells):
for trial in range(trials):
rising_event_locations = np.where(first_crossings[:,cell,trial])[0] # peel off the tuple
falling_event_locations = np.where(second_crossings[:,cell,trial])[0] # peel off the tuple
possible_pairs = []
for r in rising_event_locations:
if possible_pairs:
prev_rising = zip(*possible_pairs)[0]
prev_falling = zip(*possible_pairs)[1]
if r <= prev_falling[-1]:
continue
try:
f = falling_event_locations[np.searchsorted(falling_event_locations, r)]
possible_pairs.append([r,f])
except IndexError:
possible_pairs.append([r,time])
for pair in possible_pairs:
if pair[1]-pair[0] > distance_cutoff:
events[pair[0]:pair[1], cell, trial] = i
i = i+1
return np.squeeze(events)
def findEventsDombeck(traces, stds, false_positive_rate=0.05, lower_sigma=1, upper_sigma=5, boxWidth=3, distance_cutoff=2):
"""----------------DEPRECATED-----------------------------
This routine uses findEventsAtThreshold() at a range of thresholds to
detect both postive and going events, and calculates a false positive
rate based on the percentage of total negative events
(see Dombeck et al. 2007). It then calculates the threshold closest to
the specificed false postive rate and returns that event array for
positive going events.
The falling value is hardcoded at 0.75 * std of baseline, as per Dombeck et al. 2007.
:param: traces - 2 or 3d numpy array of traces (time x cells or time x cells x trials)
:param: stds - 1 or 2d numpy array of values representing noise levels in the data (cells, or cells x trials)
:param: false_positive_rate - float value of desired false positive rate (0.05 = 5%)
:param: lower_sigma - starting point for scan
:param: upper_sigma - stopping point for scan
:param: boxWidth - window size for pre-smoothing
:param: distance_cutoff - minimum length of event
:returns: events array for traces at desired false positive rate
"""
all_events = []
for sigma in np.arange(lower_sigma, upper_sigma, 0.125):
pos_events = findEventsAtThreshold(traces, stds, sigma, 0.75, first_mode='rising', second_mode='falling', boxWidth=boxWidth, distance_cutoff=distance_cutoff)
neg_events = findEventsAtThreshold(traces, stds, -sigma, -0.75, first_mode='falling', second_mode='rising', boxWidth=boxWidth, distance_cutoff=distance_cutoff)
temp_false_positive_rate = neg_events.max() / (pos_events.max() + neg_events.max())
all_events.append((sigma, pos_events.max(), neg_events.max(), temp_false_positive_rate, pos_events, neg_events))
closest_to_false_pos = np.argmin(np.abs(np.array(zip(*all_events)[3])-false_positive_rate)) # get all false positive rates, find index closest to 0.05
print 'Using sigma cutoff of: ' + str(all_events[closest_to_false_pos][0]) # get the right sigma
return all_events[closest_to_false_pos][4] # pos events are 4th in tuple
|
dattalab/d_code
|
events/eventRoutines.py
|
Python
|
mit
| 23,750
|
[
"Gaussian"
] |
a7698210882a6c41dae0fb8ce9b48a31c9f6b48b95dff8a99b21479b5ef7b711
|
##
# Copyright 2009-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing ALADIN, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import fileinput
import os
import re
import shutil
import sys
import tempfile
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import apply_regex_substitutions, mkdir
from easybuild.tools.modules import get_software_root, get_software_libdir
from easybuild.tools.ordereddict import OrderedDict
from easybuild.tools.run import run_cmd, run_cmd_qa
class EB_ALADIN(EasyBlock):
"""Support for building/installing ALADIN."""
def __init__(self, *args, **kwargs):
"""Initialisation of custom class variables for ALADIN."""
super(EB_ALADIN, self).__init__(*args, **kwargs)
self.conf_file = None
self.conf_filepath = None
self.rootpack_dir = 'UNKNOWN'
self.orig_library_path = None
@staticmethod
def extra_options():
"""Custom easyconfig parameters for ALADIN."""
extra_vars = {
'optional_extra_param': ['default value', "short description", CUSTOM],
}
return EasyBlock.extra_options(extra_vars)
def configure_step(self):
"""Custom configuration procedure for ALADIN."""
# unset $LIBRARY_PATH set by modules of dependencies, because it may screw up linking
if 'LIBRARY_PATH' in os.environ:
self.log.debug("Unsetting $LIBRARY_PATH (was: %s)" % os.environ['LIBRARY_PATH'])
self.orig_library_path = os.environ.pop('LIBRARY_PATH')
# build auxiliary libraries
auxlibs_dir = None
my_gnu = None
if self.toolchain.comp_family() == toolchain.GCC:
my_gnu = 'y' # gfortran
for var in ['CFLAGS', 'CXXFLAGS', 'F90FLAGS', 'FFLAGS']:
flags = os.getenv(var)
env.setvar(var, "%s -fdefault-real-8 -fdefault-double-8" % flags)
self.log.info("Updated %s to '%s'" % (var, os.getenv(var)))
elif self.toolchain.comp_family() == toolchain.INTELCOMP:
my_gnu = 'i' # icc/ifort
else:
raise EasyBuildError("Don't know how to set 'my_gnu' variable in auxlibs build script.")
self.log.info("my_gnu set to '%s'" % my_gnu)
tmp_installroot = tempfile.mkdtemp(prefix='aladin_auxlibs_')
try:
cwd = os.getcwd()
os.chdir(self.builddir)
builddirs = os.listdir(self.builddir)
auxlibs_dir = [x for x in builddirs if x.startswith('auxlibs_installer')][0]
os.chdir(auxlibs_dir)
auto_driver = 'driver_automatic'
for line in fileinput.input(auto_driver, inplace=1, backup='.orig.eb'):
line = re.sub(r"^(my_gnu\s*=\s*).*$", r"\1%s" % my_gnu, line)
line = re.sub(r"^(my_r32\s*=\s*).*$", r"\1n", line) # always 64-bit real precision
line = re.sub(r"^(my_readonly\s*=\s*).*$", r"\1y", line) # make libs read-only after build
line = re.sub(r"^(my_installroot\s*=\s*).*$", r"\1%s" % tmp_installroot, line)
sys.stdout.write(line)
run_cmd("./%s" % auto_driver)
os.chdir(cwd)
except OSError, err:
raise EasyBuildError("Failed to build ALADIN: %s", err)
# build gmkpack, update PATH and set GMKROOT
# we build gmkpack here because a config file is generated in the gmkpack isntall path
try:
gmkpack_dir = [x for x in builddirs if x.startswith('gmkpack')][0]
os.chdir(os.path.join(self.builddir, gmkpack_dir))
qa = {
'Do you want to run the configuration file maker assistant now (y) or later [n] ?': 'n',
}
run_cmd_qa("./build_gmkpack", qa)
os.chdir(cwd)
paths = os.getenv('PATH').split(':')
paths.append(os.path.join(self.builddir, gmkpack_dir, 'util'))
env.setvar('PATH', ':'.join(paths))
env.setvar('GMKROOT', os.path.join(self.builddir, gmkpack_dir))
except OSError, err:
raise EasyBuildError("Failed to build gmkpack: %s", err)
# generate gmkpack configuration file
self.conf_file = 'ALADIN_%s' % self.version
self.conf_filepath = os.path.join(self.builddir, 'gmkpack_support', 'arch', '%s.x' % self.conf_file)
try:
if os.path.exists(self.conf_filepath):
os.remove(self.conf_filepath)
self.log.info("Removed existing gmpack config file %s" % self.conf_filepath)
archdir = os.path.dirname(self.conf_filepath)
if not os.path.exists(archdir):
mkdir(archdir, parents=True)
except OSError, err:
raise EasyBuildError("Failed to remove existing file %s: %s", self.conf_filepath, err)
mpich = 'n'
known_mpi_libs = [toolchain.MPICH, toolchain.MPICH2, toolchain.INTELMPI]
if self.toolchain.options.get('usempi', None) and self.toolchain.mpi_family() in known_mpi_libs:
mpich = 'y'
qpref = 'Please type the ABSOLUTE name of '
qsuff = ', or ignore (environment variables allowed) :'
qsuff2 = ', or ignore : (environment variables allowed) :'
comp_fam = self.toolchain.comp_family()
if comp_fam == toolchain.GCC:
gribdir = 'GNU'
elif comp_fam == toolchain.INTELCOMP:
gribdir = 'INTEL'
else:
raise EasyBuildError("Don't know which grib lib dir to use for compiler %s", comp_fam)
aux_lib_gribex = os.path.join(tmp_installroot, gribdir, 'lib', 'libgribex.a')
aux_lib_ibm = os.path.join(tmp_installroot, gribdir, 'lib', 'libibmdummy.a')
grib_api_lib = os.path.join(get_software_root('grib_api'), 'lib', 'libgrib_api.a')
grib_api_f90_lib = os.path.join(get_software_root('grib_api'), 'lib', 'libgrib_api_f90.a')
grib_api_inc = os.path.join(get_software_root('grib_api'), 'include')
jasperlib = os.path.join(get_software_root('JasPer'), 'lib', 'libjasper.a')
mpilib = os.path.join(os.getenv('MPI_LIB_DIR'), os.getenv('MPI_LIB_SHARED'))
# netCDF
netcdf = get_software_root('netCDF')
netcdf_fortran = get_software_root('netCDF-Fortran')
if netcdf:
netcdfinc = os.path.join(netcdf, 'include')
if netcdf_fortran:
netcdflib = os.path.join(netcdf_fortran, get_software_libdir('netCDF-Fortran'), 'libnetcdff.a')
else:
netcdflib = os.path.join(netcdf, get_software_libdir('netCDF'), 'libnetcdff.a')
if not os.path.exists(netcdflib):
raise EasyBuildError("%s does not exist", netcdflib)
else:
raise EasyBuildError("netCDF(-Fortran) not available")
ldpaths = [ldflag[2:] for ldflag in os.getenv('LDFLAGS').split(' ')] # LDFLAGS have form '-L/path/to'
lapacklibs = []
for lib in os.getenv('LAPACK_STATIC_LIBS').split(','):
libpaths = [os.path.join(ldpath, lib) for ldpath in ldpaths]
lapacklibs.append([libpath for libpath in libpaths if os.path.exists(libpath)][0])
lapacklib = ' '.join(lapacklibs)
blaslibs = []
for lib in os.getenv('BLAS_STATIC_LIBS').split(','):
libpaths = [os.path.join(ldpath, lib) for ldpath in ldpaths]
blaslibs.append([libpath for libpath in libpaths if os.path.exists(libpath)][0])
blaslib = ' '.join(blaslibs)
qa = {
'Do you want to run the configuration file maker assistant now (y) or later [n] ?': 'y',
'Do you want to setup your configuration file for MPICH (y/n) [n] ?': mpich,
'Please type the directory name where to find a dummy file mpif.h or ignore :': os.getenv('MPI_INC_DIR'),
'%sthe library gribex or emos%s' % (qpref, qsuff2): aux_lib_gribex,
'%sthe library ibm%s' % (qpref, qsuff): aux_lib_ibm,
'%sthe library grib_api%s' % (qpref, qsuff): grib_api_lib,
'%sthe library grib_api_f90%s' % (qpref, qsuff): grib_api_f90_lib,
'%sthe JPEG auxilary library if enabled by Grib_api%s' % (qpref, qsuff2): jasperlib,
'%sthe library netcdf%s' % (qpref, qsuff): netcdflib,
'%sthe library lapack%s' % (qpref, qsuff): lapacklib,
'%sthe library blas%s' % (qpref, qsuff): blaslib,
'%sthe library mpi%s' % (qpref, qsuff): mpilib,
'%sa MPI dummy library for serial executions, or ignore :' % qpref: '',
'Please type the directory name where to find grib_api headers, or ignore :': grib_api_inc,
'Please type the directory name where to find fortint.h or ignore :': '',
'Please type the directory name where to find netcdf headers, or ignore :': netcdfinc,
'Do you want to define CANARI (y/n) [y] ?': 'y',
'Please type the name of the script file used to generate a preprocessed blacklist file, or ignore :': '',
'Please type the name of the script file used to recover local libraries (gget), or ignore :': '',
'Please type the options to tune the gnu compilers, or ignore :': os.getenv('F90FLAGS'),
}
f90_seq = os.getenv('F90_SEQ')
if not f90_seq:
# F90_SEQ is only defined when usempi is enabled
f90_seq = os.getenv('F90')
stdqa = OrderedDict([
(r'Confirm library .* is .*', 'y'), # this one needs to be tried first!
(r'.*fortran 90 compiler name .*\s*:\n\(suggestions\s*: .*\)', f90_seq),
(r'.*fortran 90 compiler interfaced with .*\s*:\n\(suggestions\s*: .*\)', f90_seq),
(r'Please type the ABSOLUTE name of .*library.*, or ignore\s*[:]*\s*[\n]*.*', ''),
(r'Please .* to save this draft configuration file :\n.*', '%s.x' % self.conf_file),
])
no_qa = [
".*ignored.",
]
env.setvar('GMKTMP', self.builddir)
env.setvar('GMKFILE', self.conf_file)
run_cmd_qa("gmkfilemaker", qa, std_qa=stdqa, no_qa=no_qa)
# set environment variables for installation dirs
env.setvar('ROOTPACK', os.path.join(self.installdir, 'rootpack'))
env.setvar('ROOTBIN', os.path.join(self.installdir, 'rootpack'))
env.setvar('HOMEPACK', os.path.join(self.installdir, 'pack'))
env.setvar('HOMEBIN', os.path.join(self.installdir, 'pack'))
# patch config file to include right Fortran compiler flags
regex_subs = [(r"^(FRTFLAGS\s*=.*)$", r"\1 %s" % os.getenv('FFLAGS'))]
apply_regex_substitutions(self.conf_filepath, regex_subs)
def build_step(self):
"""No separate build procedure for ALADIN (see install_step)."""
pass
def test_step(self):
"""Custom built-in test procedure for ALADIN."""
if self.cfg['runtest']:
cmd = "test-command"
run_cmd(cmd, simple=True, log_all=True, log_output=True)
def install_step(self):
"""Custom install procedure for ALADIN."""
try:
mkdir(os.getenv('ROOTPACK'), parents=True)
mkdir(os.getenv('HOMEPACK'), parents=True)
except OSError, err:
raise EasyBuildError("Failed to create rootpack dir in %s: %s", err)
# create rootpack
[v1, v2] = self.version.split('_')
(out, _) = run_cmd("source $GMKROOT/util/berootpack && gmkpack -p master -a -r %s -b %s" % (v1, v2), simple=False)
packdir_regexp = re.compile("Creating main pack (.*) \.\.\.")
res = packdir_regexp.search(out)
if res:
self.rootpack_dir = os.path.join('rootpack', res.group(1))
else:
raise EasyBuildError("Failed to determine rootpack dir.")
# copy ALADIN sources to right directory
try:
src_dirs = [d for d in os.listdir(self.builddir) if not (d.startswith('auxlib') or d.startswith('gmk'))]
target = os.path.join(self.installdir, self.rootpack_dir, 'src', 'local')
self.log.info("Copying sources from %s to %s" % (self.builddir, target))
for srcdir in src_dirs:
shutil.copytree(os.path.join(self.builddir, srcdir), os.path.join(target, srcdir))
self.log.info("Copied %s" % srcdir)
except OSError, err:
raise EasyBuildError("Failed to copy ALADIN sources: %s", err)
if self.cfg['parallel']:
env.setvar('GMK_THREADS', str(self.cfg['parallel']))
# build rootpack
run_cmd(os.path.join(self.installdir, self.rootpack_dir, 'ics_master'))
# restore original $LIBRARY_PATH
if self.orig_library_path is not None:
os.environ['LIBRARY_PATH'] = self.orig_library_path
def sanity_check_step(self):
"""Custom sanity check for ALADIN."""
bindir = os.path.join(self.rootpack_dir, 'bin')
libdir = os.path.join(self.rootpack_dir, 'lib')
custom_paths = {
'files': [os.path.join(bindir, x) for x in ['MASTER']] +
[os.path.join(libdir, 'lib%s.local.a' % x) for x in ['aeo', 'ald', 'arp', 'bip',
'bla', 'mpa', 'mse', 'obt',
'odb', 'sat', 'scr', 'sct',
'sur', 'surfex', 'tal', 'tfl',
'uti', 'xla', 'xrd']],
'dirs': [],
}
super(EB_ALADIN, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""Custom guesses for environment variables (PATH, ...) for ALADIN."""
guesses = super(EB_ALADIN, self).make_module_req_guess()
guesses.update({
'PATH': [os.path.join(self.rootpack_dir, 'bin')],
})
return guesses
|
ULHPC/easybuild-easyblocks
|
easybuild/easyblocks/a/aladin.py
|
Python
|
gpl-2.0
| 15,303
|
[
"NetCDF"
] |
d2bfd9e467c6e9c1932b6807b709bce5d07141edd8c93d05c6966237eb76f80c
|
# ----------------------------------------------------------------------
# Copyright (c) 2016, The Regents of the University of California All
# rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of The Regents of the University of California
# nor the names of its contributors may be used to endorse or
# promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE
# UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
# ----------------------------------------------------------------------
# Filename: Tinker.py
# Version: 0.1
# Description: A collection of utility methods, and the Tinker class that
# encapsulates the environment necessary to run the tinker script and generate
# the custom board-support package necessary for the Altera OpenCL Compiler
# Author: Dustin Richmond
import xml.etree.ElementTree as ET, math, os
from xml.dom import minidom
import sys, re, json
from IP import parse_string
class Tinker():
__C_TCLXML_ENV_VAR_NAME = "TCLXML_PATH"
__C_TINKER_PATH_ENV_VAR_NAME = "TINKER_PATH"
__C_TINKER_SKELS_FOLDER = "skels"
__C_TINKER_KNOWN_VERSIONS = set(["14.0", "15.1", "16.0"])
def __init__(self, version=None, board=None, output=None):
self.__check_env()
tp = self.__get_tinker_path()
self.__tinker_path = tp
d = self.__parse()
self.__check(d)
self.__d = d
if(version != None):
self.check_version(version)
self.__v = version
if(board != None):
if(version == None):
sys.exit("Error!")
else:
self.check_version_board(version, board)
self.__b = board
if(output == None):
output = "./"
output = os.path.abspath(output)
check_path(output)
self.__output_path = output
self.get_path_skel()
def get_name(self, b, d):
return b["name"] + "_" + d["name"]
def get_path_skel(self):
self.__check_init()
p = self.__d[self.__v][self.__b]["path"]
check_path(p)
return p
def get_path_skel_xml(self):
p = self.get_path_skel()
f = self.get_name_skel_xml()
return p +"/" + f
def get_name_skel_xml(self):
f = self.__d[self.__v][self.__b]["xml"]
return f
def get_path_output(self, b, d):
return self.__output_path + "/" + self.get_name(b, d)
def get_path_tcl(self):
self.__check_init_version()
self.check_version(self.__v)
return self.__tinker_path + "/tcl/" + str(self.__v) + "/"
def get_version(self):
self.__check_init_version()
return self.__v
def get_board(self):
self.__check_init_board()
return self.__b
def get_versions(self):
for v in self.__d.keys():
self.check_version(version)
return self.__d.keys()
def get_boards_version(self, version):
self.check_version(version)
return self.__d[version]["boards"]
def __get_tinker_path(self):
p = os.path.expandvars("${TINKER_PATH}") + "/"
return p
def __get_tclxml_path(self):
p = os.path.expandvars("${TCLXML_PATH}") + "/"
return p
def check_version(self, version):
if(str(version) not in self.__C_TINKER_KNOWN_VERSIONS):
sys.exit(("Error! \"%s\" is not a known Quartus version. "
+ "Valid versions are: %s")
% (str(version),
str(list(self.__C_TINKER_KNOWN_VERSIONS))))
if(not self.__is_version(version)):
sys.exit("ERROR: %s is not a valid version" % str(version))
def check_version_board(self, version, board):
self.check_version(version)
if(not self.__is_board(version,board)):
sys.exit("ERROR: %s is not a known board for version %s"
% (str(board), str(version)))
def __check(self, d):
for (v,dv) in d.iteritems():
pv = dv["path"]
check_path(pv)
for b in dv["boards"]:
pb = dv[b]["path"]
check_path(pb)
def __check_init_version(self):
if(self.__v is None):
sys.exit("Error! Tinker object not initialized"
+ " with a Quartus version")
def __check_init_board(self):
if(self.__b is None):
sys.exit("Error! Tinker object not initialized"
+ " with a Board name")
def __check_init(self):
self.__check_init_version()
self.__check_init_board()
def __check_env(self):
check_env_var(self.__C_TCLXML_ENV_VAR_NAME)
check_path(self.__get_tclxml_path())
check_abs(self.__get_tclxml_path())
check_env_var(self.__C_TINKER_PATH_ENV_VAR_NAME)
check_path(self.__get_tinker_path())
check_abs(self.__get_tinker_path())
def __parse(self):
vs = self.__parse_versions(self.__tinker_path)
vdb = {}
for v in vs:
dv = {}
dv["path"] = self.__parse_path_version_dir(v)
dv["boards"] = self.__parse_boards(v)
for b in dv["boards"]:
dv[b] = {"path":self.__parse_path_board_dir(v, b),
"xml":self.__parse_name_board_xml(v, b)}
vdb[v] = dv
return vdb
def __parse_path_version_dir(self, v):
p = self.__parse_path_version_xml()
r = ET.parse(p)
e = r.find("./release/[@version='%s']" % str(v))
p = os.path.expandvars(parse_string(e,"path"))
check_path(p)
return p
def __parse_path_version_xml(self):
p = self.__tinker_path + self.__C_TINKER_SKELS_FOLDER + "/versions.xml"
check_path(p)
return p
def __parse_versions(self, tp):
p = self.__parse_path_version_xml()
r = ET.parse(p)
vs = [parse_string(e,"version") for e in r.findall("./release/[@version]")]
return vs
def __parse_path_board_dir(self, version, board):
pv = self.__parse_path_version_dir(version)
check_path(pv)
pvx = pv + "/boards.xml"
check_path(pvx)
r = ET.parse(pvx)
e = r.find("./board/[@name='%s']" % board)
pd = pv + "/" + os.path.expandvars(parse_string(e,"path"))
check_path(pd)
return pd
def __parse_name_board_xml(self, version, board):
pv = self.__parse_path_version_dir(version)
check_path(pv)
pvx = pv + "/boards.xml"
check_path(pvx)
r = ET.parse(pvx)
e = r.find("./board/[@name='%s']" % board)
f = parse_string(e,"xml")
return f
def __parse_boards(self, version):
p = self.__tinker_path + "skels/versions.xml"
check_path(p)
r = ET.parse(p)
es = r.findall("./release/[@version='%s']" % str(version))
e = es[0]
if(len(es) > 1):
sys.exit("ERROR: Multiple matches for version %s" % str(version))
p = os.path.expandvars(parse_string(e,"path")) + "/boards.xml"
check_path(p)
r = ET.parse(p)
boards = []
for e in r.iterfind(("./board/[@version='%s']" % version)):
boards.append(parse_string(e,"name"))
return boards
def __is_version(self, v):
return v in self.__d.keys()
def __is_board(self, v, b):
return self.__is_version(v) and b in self.__d[v]["boards"]
def contains_duplicates(l):
len(l) != len(set(l))
def is_in_range(v, min, max):
return (min <= v <= max)
def is_pow_2(v):
return v != 0 and ((v & (v - 1)) == 0)
def is_alphabetic(s):
return is_string(s) and s.isalpha() and len(s) == 1
def is_alphachar(s):
return is_string(s) and s.isalpha() and len(s) == 1
def is_string(s):
return isinstance(s, basestring)
def is_list(l):
return isinstance(l, list)
def is_dict(l):
return isinstance(l, dict)
def is_int(l):
return isinstance(l, int)
def is_float(l):
return isinstance(l, float)
def is_id(i):
return is_string(i) and i.isalpha()
def is_valid_verilog_name(s):
if(not is_string(s)
or s is ""
or s[0].isdigit()
or re.match(r'\w+',s) is None):
return False
return True
def key_error(ks, ds):
print "In key-value map:"
print ds
sys.exit("Error! Key \"%s\" missing" % ks)
def key_error_xml(ks, es):
print "In Element:"
print es
sys.exit("Error! Key \"%s\" missing" % ks)
def value_error(ks, vs, vvs):
sys.exit(("Error! Key \"%s\" has invalid value \"%s\". " +
"Valid values are: %s")
% (ks, vs , vvs))
def path_error_xml(t, es):
print "In XML Element:"
print es
sys.exit("Subelement with path %s not found" % t)
def value_error_xml(ks, vs, vvs, es):
print "In XML Element:"
print es
value_error(ks,vs,vvs)
def value_error_map(ks, vs, vvs, ds):
print "In key-value map:"
print ds
value_error(ks,vs,vvs)
def tostr_dict(d):
return json.dumps(d,indent=2)
def prettify(elem):
rough_string = ET.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
def clog2(i):
return math.floor(math.log(i)/math.log(2))
def check_path(p):
if(not os.path.exists(p)):
sys.exit("ERROR: Path %s does not exist" % p)
def check_abs(p):
if(not os.path.isabs(p)):
sys.exit("Error! Path %s is not an absolute path" % p)
def check_env_var(v):
if(v not in os.environ):
sys.exit("Error! Environment Variable %s not set" % v)
|
drichmond/tinker
|
python/Tinker.py
|
Python
|
bsd-3-clause
| 11,186
|
[
"TINKER"
] |
fcb8ed480f51a04f3ecc8849ac95843516440a75a39165ab970ed201319e4d31
|
#!/usr/bin/python2
# This is a neural network based on PyBrain,
# taking as input an X and Y and any tree complexity parameters, and
# returning a classifier that can then be analyzed with the classifier.
# See the example in the main method for that and error-checking.
import sys
from util import write_test_prediction, load_validation_data
from metrics import acc
from pybrain.datasets import ClassificationDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure.modules import SoftmaxLayer
from pylab import ion, ioff, figure, draw, contourf, clf, show, hold, plot
from scipy import diag, arange, meshgrid, where
from numpy.random import multivariate_normal
from numpy import mat
class NeuralNetworkClassifier(object):
def __init__(self, trainer):
self.trainer = trainer
def predict(self, data):
predictions = testOnClassData(dataset=tstdata)
'''
predictions = self.trainer.activateOnDataset(data)
predictions = predictions.argmax(axis=1)
#predictions = predictions.reshape(X.shape) ???
'''
return predictions
def classify(Xtrain, Ytrain, n_hidden=5):
""" Use entirety of provided X, Y to predict
Arguments
Xtrain -- Training data
Ytrain -- Training prediction
Returns
classifier -- a classifier fitted to Xtrain and Ytrain
"""
# PyBrain expects data in its DataSet format
trndata = ClassificationDataSet(Xtrain.shape[1], nb_classes=2)
trndata.setField('input', Xtrain)
# Apprently, arrays don't work here as they try to access second dimension size...
trndata.setField('target', mat(Ytrain).transpose())
trndata._convertToOneOfMany() # one output neuron per class
# build neural net and train it
net = buildNetwork(trndata.indim, n_hidden, trndata.outdim, outclass=SoftmaxLayer)
trainer = BackpropTrainer(net, dataset=trndata, momentum=0.1, verbose=True, weightdecay=0.01)
trainer.trainUntilConvergence()
#trainer.trainEpochs(5)
print "trained"
#trainer.trainEpochs(5)
# Return a functor that wraps calling predict
return NeuralNetworkClassifier(trainer)
if __name__ == "__main__":
# First obtain our training and testing data
# Training has 50K samples, Testing 100K
Xt, Yt, Xv = load_validation_data()
# Run Neural Network over training data
classifier = classify(Xt, Yt)
# Prepare validation data and predict
tstdata = ClassificationDataSet(Xv.shape[1], 1, nb_classes=2)
tstdata.setField('input', Xv)
tstdata._convertToOneOfMany() # one output neuron per class
predictions = classifier.predict(tstdata)
# Write prediction to file
write_test_prediction("out_nn.txt", np.array(majority))
|
bravelittlescientist/kdd-particle-physics-ml-fall13
|
src/nn_submission.py
|
Python
|
gpl-2.0
| 2,820
|
[
"NEURON"
] |
6a6d29021fbc48d566cb83631bc4d7bc20e8a1bd8174f901dfb759ea52968a6c
|
#!/usr/bin/env python
#-*- coding: utf-8 -*
#
# Copyright (c) 2009 Antonio Barbuzzi <antonio.barbuzzi@gmail.com>,
# Telematics Lab, DEE, Politecnico di Bari, Italy. All rights reserved.
#
#
# This file is part of DeSRTO.
#
# DeSRTO is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# DeSRTO is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DeSRTO. If not, see <http://www.gnu.org/licenses/>.
# For bug report and other information please visit Telematics Lab site
# http://telematics.poliba.it or send an email to the author
from optparse import OptionParser
import os
from desrto import DeSRTO
BANNER = '''
DeSRTO
DeSRTO Copyright (C) 2009 Antonio Barbuzzi (antonio.barbuzzi@gmail.com)
Telematics Lab, DEE, Politecnico di Bari, Italy.
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute
it under certain conditions. See COPYING for details.
'''
def main():
global BANNER
parser = OptionParser()
parser.add_option("-s", "--data-sender-filename", dest="data_sender_filename",
help="Cap file of the Data Sender", metavar="FILE")
parser.add_option("--data-sender-mac", dest="data_sender_mac",
help="Mac Address of the Data Sender Card. Mandatory if applicable", type="string", default=None)
parser.add_option("-r", "--data-receiver-filename", dest="data_receiver_filename",
help="Cap file of the receiver", metavar="FILE")
parser.add_option("--data-receiver-mac", dest="data_receiver_mac",
help="Mac Address of the Data Receiver Card. Mandatory if applicable", type="string", default=None)
parser.add_option("--rto-list-filename", dest="rto_list_filename",
help="File with the list of RTO(s)", metavar="FILE")
parser.add_option("-w", "--save-report", dest="report_filename",
help="Filename where to save a report", metavar="FILE")
# parser.add_option("--nat", dest="nat", default=False,
# help="Flow is Natted")
parser.add_option("--nat",
action="store_true", dest="nat", default=False,
help="Flow is Natted")
parser.add_option("--interprest-butterfly",
action="store_true", dest="betabutterfly", default=False,
help="Enable the division of Butterfly RTO in Butterfly-NRTO and Butterfly-SRTO (beta)")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
(options, args) = parser.parse_args()
if not options.data_sender_filename:
parser.error("Data sender filename not defined")
if not os.path.isfile(options.data_sender_filename):
parser.error("Filename %s doesn't esist" % options.data_sender_filename)
if not options.data_receiver_filename:
parser.error("Data receiver filename not defined")
if not os.path.isfile(options.data_receiver_filename):
parser.error("Filename %s doesn't esist" % options.data_receiver_filename)
#if not options.report_filename:
# options.report_filename = None
if not options.rto_list_filename:
a = options.data_receiver_filename
path = "/".join(a.split('/')[:-1])
if len(path)>0:
options.rto_list_filename = path + "/rto_detected.log"
else:
options.rto_list_filename = "rto_detected.log"
if not os.path.isfile(options.rto_list_filename):
parser.error("Filename %s doesn't esist" % options.rto_list_filename)
if options.verbose:
print BANNER
b = DeSRTO(sender_cap_file = options.data_sender_filename, receiver_cap_file = options.data_receiver_filename,
sender_mac = options.data_sender_mac, receiver_mac=options.data_receiver_mac,
rto_file = options.rto_list_filename, nat=options.nat, betabutterfly=options.betabutterfly)
b.summary()
b.analyse()
if options.report_filename:
b.dump_report(options.report_filename)
if __name__=='__main__':
main()
|
antoniobarbuzzi/desrto
|
run.py
|
Python
|
gpl-2.0
| 4,698
|
[
"VisIt"
] |
2cf8568ca370fd6fcfbd866e02ed1fe7126b08e3989f9fd4c6db08f05c074d9d
|
# Copyright 2001 by Gavin E. Crooks. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
# Gavin E. Crooks 2001-10-10
"""ASTRAL RAF (Rapid Access Format) Sequence Maps.
The ASTRAL RAF Sequence Maps record the relationship between the PDB SEQRES
records (representing the sequence of the molecule used in an experiment) to
the ATOM records (representing the atoms experimentally observed).
This data is derived from the Protein Data Bank CIF files. Known errors in the
CIF files are corrected manually, with the original PDB file serving as the
final arbiter in case of discrepancies.
Residues are referenced by residue ID. This consists of a the PDB residue
sequence number (upto 4 digits) and an optional PDB insertion code (an
ascii alphabetic character, a-z, A-Z). e.g. "1", "10A", "1010b", "-1"
See "ASTRAL RAF Sequence Maps":http://astral.stanford.edu/raf.html
to_one_letter_code -- A mapping from the 3-letter amino acid codes found
in PDB files to 1-letter codes. The 3-letter codes
include chemically modified residues.
"""
from copy import copy
from types import *
from Residues import Residues
# This table is taken from the RAF release notes, and includes the
# undocumented mapping "UNK" -> "X"
to_one_letter_code= {
'ALA':'A', 'VAL':'V', 'PHE':'F', 'PRO':'P', 'MET':'M',
'ILE':'I', 'LEU':'L', 'ASP':'D', 'GLU':'E', 'LYS':'K',
'ARG':'R', 'SER':'S', 'THR':'T', 'TYR':'Y', 'HIS':'H',
'CYS':'C', 'ASN':'N', 'GLN':'Q', 'TRP':'W', 'GLY':'G',
'2AS':'D', '3AH':'H', '5HP':'E', 'ACL':'R', 'AIB':'A',
'ALM':'A', 'ALO':'T', 'ALY':'K', 'ARM':'R', 'ASA':'D',
'ASB':'D', 'ASK':'D', 'ASL':'D', 'ASQ':'D', 'AYA':'A',
'BCS':'C', 'BHD':'D', 'BMT':'T', 'BNN':'A', 'BUC':'C',
'BUG':'L', 'C5C':'C', 'C6C':'C', 'CCS':'C', 'CEA':'C',
'CHG':'A', 'CLE':'L', 'CME':'C', 'CSD':'A', 'CSO':'C',
'CSP':'C', 'CSS':'C', 'CSW':'C', 'CXM':'M', 'CY1':'C',
'CY3':'C', 'CYG':'C', 'CYM':'C', 'CYQ':'C', 'DAH':'F',
'DAL':'A', 'DAR':'R', 'DAS':'D', 'DCY':'C', 'DGL':'E',
'DGN':'Q', 'DHA':'A', 'DHI':'H', 'DIL':'I', 'DIV':'V',
'DLE':'L', 'DLY':'K', 'DNP':'A', 'DPN':'F', 'DPR':'P',
'DSN':'S', 'DSP':'D', 'DTH':'T', 'DTR':'W', 'DTY':'Y',
'DVA':'V', 'EFC':'C', 'FLA':'A', 'FME':'M', 'GGL':'E',
'GLZ':'G', 'GMA':'E', 'GSC':'G', 'HAC':'A', 'HAR':'R',
'HIC':'H', 'HIP':'H', 'HMR':'R', 'HPQ':'F', 'HTR':'W',
'HYP':'P', 'IIL':'I', 'IYR':'Y', 'KCX':'K', 'LLP':'K',
'LLY':'K', 'LTR':'W', 'LYM':'K', 'LYZ':'K', 'MAA':'A',
'MEN':'N', 'MHS':'H', 'MIS':'S', 'MLE':'L', 'MPQ':'G',
'MSA':'G', 'MSE':'M', 'MVA':'V', 'NEM':'H', 'NEP':'H',
'NLE':'L', 'NLN':'L', 'NLP':'L', 'NMC':'G', 'OAS':'S',
'OCS':'C', 'OMT':'M', 'PAQ':'Y', 'PCA':'E', 'PEC':'C',
'PHI':'F', 'PHL':'F', 'PR3':'C', 'PRR':'A', 'PTR':'Y',
'SAC':'S', 'SAR':'G', 'SCH':'C', 'SCS':'C', 'SCY':'C',
'SEL':'S', 'SEP':'S', 'SET':'S', 'SHC':'C', 'SHR':'K',
'SOC':'C', 'STY':'Y', 'SVA':'S', 'TIH':'A', 'TPL':'W',
'TPO':'T', 'TPQ':'A', 'TRG':'K', 'TRO':'W', 'TYB':'Y',
'TYQ':'Y', 'TYS':'Y', 'TYY':'Y', 'AGM':'R', 'GL3':'G',
'SMC':'C', 'ASX':'B', 'CGU':'E', 'CSX':'C', 'GLX':'Z',
'PYX':'C',
'UNK':'X'
}
def normalize_letters(one_letter_code):
"""Convert RAF one-letter amino acid codes into IUPAC standard codes.
Letters are uppercased, and "." ("Unknown") is converted to "X".
"""
if one_letter_code == '.':
return 'X'
else:
return one_letter_code.upper()
class SeqMapIndex(dict):
"""An RAF file index.
The RAF file itself is about 50 MB. This index provides rapid, random
access of RAF records without having to load the entire file into memory.
The index key is a concatenation of the PDB ID and chain ID. e.g
"2drcA", "155c_". RAF uses an underscore to indicate blank
chain IDs.
"""
def __init__(self, filename):
"""
Arguments:
filename -- The file to index
"""
dict.__init__(self)
self.filename = filename
f = open(self.filename)
try:
position = 0
while True:
line = f.readline()
if not line: break
key = line[0:5]
if key != None:
self[key]=position
position = f.tell()
finally:
f.close()
def __getitem__(self, key):
""" Return an item from the indexed file. """
position = dict.__getitem__(self,key)
f = open(self.filename)
try:
f.seek(position)
line = f.readline()
record = SeqMap(line)
finally:
f.close()
return record
def getSeqMap(self, residues):
"""Get the sequence map for a collection of residues.
residues -- A Residues instance, or a string that can be converted into
a Residues instance.
"""
if type(residues) == StringType:
residues = Residues(residues)
pdbid = residues.pdbid
frags = residues.fragments
if not frags: frags =(('_','',''),) # All residues of unnamed chain
seqMap = None
for frag in frags:
chainid = frag[0]
if chainid=='' or chainid=='-' or chainid==' ' or chainid=='_':
chainid = '_'
id = pdbid + chainid
sm = self[id]
#Cut out fragment of interest
start = 0
end = len(sm.res)
if frag[1] : start = int(sm.index(frag[1], chainid))
if frag[2] : end = int(sm.index(frag[2], chainid)+1)
sm = sm[start:end]
if seqMap == None:
seqMap = sm
else:
seqMap += sm
return seqMap
class SeqMap:
"""An ASTRAL RAF (Rapid Access Format) Sequence Map.
This is a list like object; You can find the location of particular residues
with index(), slice this SeqMap into fragments, and glue fragments back
together with extend().
pdbid -- The PDB 4 character ID
pdb_datestamp -- From the PDB file
version -- The RAF format version. e.g. 0.01
flags -- RAF flags. (See release notes for more information.)
res -- A list of Res objects, one for each residue in this sequence map
"""
def __init__(self, line=None):
self.pdbid = ''
self.pdb_datestamp = ''
self.version = ''
self.flags = ''
self.res = []
if line:
self._process(line)
def _process(self, line):
"""Parses a RAF record into a SeqMap object.
"""
header_len = 38
line = line.rstrip() # no trailing whitespace
if len(line)<header_len:
raise ValueError("Incomplete header: "+line)
self.pdbid = line[0:4]
chainid = line[4:5]
self.version = line[6:10]
#Raf format versions 0.01 and 0.02 are identical for practical purposes
if(self.version != "0.01" and self.version !="0.02"):
raise ValueError("Incompatible RAF version: "+self.version)
self.pdb_datestamp = line[14:20]
self.flags = line[21:27]
for i in range(header_len, len(line), 7):
f = line[i : i+7]
if len(f)!=7:
raise ValueError("Corrupt Field: ("+f+")")
r = Res()
r.chainid = chainid
r.resid = f[0:5].strip()
r.atom = normalize_letters(f[5:6])
r.seqres = normalize_letters(f[6:7])
self.res.append(r)
def index(self, resid, chainid="_"):
for i in range(0, len(self.res)):
if self.res[i].resid == resid and self.res[i].chainid == chainid:
return i
raise KeyError("No such residue "+chainid+resid)
def __getslice__(self, i, j):
s = copy(self)
s.res = s.res[i:j]
return s
def append(self, res):
"""Append another Res object onto the list of residue mappings."""
self.res.append(res)
def extend(self, other):
"""Append another SeqMap onto the end of self.
Both SeqMaps must have the same PDB ID, PDB datestamp and
RAF version. The RAF flags are erased if they are inconsistent. This
may happen when fragments are taken from different chains.
"""
if not isinstance(other, SeqMap):
raise TypeError("Can only extend a SeqMap with a SeqMap.")
if self.pdbid != other.pdbid:
raise TypeError("Cannot add fragments from different proteins")
if self.version != other.version:
raise TypeError("Incompatible rafs")
if self.pdb_datestamp != other.pdb_datestamp:
raise TypeError("Different pdb dates!")
if self.flags != other.flags:
self.flags = ''
self.res += other.res
def __iadd__(self, other):
self.extend(other)
return self
def __add__(self, other):
s = copy(self)
s.extend(other)
return s
def getAtoms(self, pdb_handle, out_handle):
"""Extract all relevant ATOM and HETATOM records from a PDB file.
The PDB file is scanned for ATOM and HETATOM records. If the
chain ID, residue ID (seqNum and iCode), and residue type match
a residue in this sequence map, then the record is echoed to the
output handle.
This is typically used to find the coordinates of a domain, or other
residue subset.
pdb_handle -- A handle to the relevant PDB file.
out_handle -- All output is written to this file like object.
"""
#This code should be refactored when (if?) biopython gets a PDB parser
#The set of residues that I have to find records for.
resSet = {}
for r in self.res:
if r.atom=='X' : #Unknown residue type
continue
chainid = r.chainid
if chainid == '_':
chainid = ' '
resid = r.resid
resSet[(chainid,resid)] = r
resFound = {}
for line in pdb_handle.xreadlines():
if line.startswith("ATOM ") or line.startswith("HETATM"):
chainid = line[21:22]
resid = line[22:27].strip()
key = (chainid, resid)
if key in resSet:
res = resSet[key]
atom_aa = res.atom
resName = line[17:20]
if resName in to_one_letter_code:
if to_one_letter_code[resName] == atom_aa:
out_handle.write(line)
resFound[key] = res
if len(resSet) != len(resFound):
#for k in resFound.keys():
# del resSet[k]
#print resSet
raise RuntimeError('I could not find at least one ATOM or HETATM' \
+' record for each and every residue in this sequence map.')
class Res:
""" A single residue mapping from a RAF record.
chainid -- A single character chain ID.
resid -- The residue ID.
atom -- amino acid one-letter code from ATOM records.
seqres -- amino acid one-letter code from SEQRES records.
"""
def __init__(self):
self.chainid = ''
self.resid = ''
self.atom = ''
self.seqres = ''
def parse(handle):
"""Iterates over a RAF file, returning a SeqMap object for each line
in the file.
Arguments:
handle -- file-like object.
"""
for line in handle:
yield SeqMap(line)
|
NirBenTalLab/proorigami-cde-package
|
cde-root/usr/lib64/python2.4/site-packages/Bio/SCOP/Raf.py
|
Python
|
mit
| 12,025
|
[
"Biopython"
] |
01b66600128590e1337c6c10e844fb8ba126f3d5c23192066d6a4e812d5c4c4e
|
from __future__ import print_function, absolute_import, division
import os
import sys
import time
import signal
import traceback
from socket import gethostname
from getpass import getuser
from datetime import datetime
from six import iteritems
from six.moves import cStringIO
from sqlalchemy import func
from sklearn.base import clone, BaseEstimator
import numpy as np
from . import __version__
from .config import Config
from .trials import Trial
from .fit_estimator import fit_and_score_estimator
from .utils import Unbuffered, format_timedelta, current_pretty_time
from .utils import is_msmbuilder_estimator, num_samples
from .utils import is_json_serializable
class MaxParamSuggestionRetriesExceeded(Exception):
pass
def execute(args, parser):
start_time = datetime.now()
sys.stdout = Unbuffered(sys.stdout)
# Load the config file and extract the fields
print_header()
config = Config(args.config)
random_seed = args.seed if args.seed is not None else config.random_seed()
max_param_suggestion_retries = config.max_param_suggestion_retries()
estimator = config.estimator()
if 'random_state' in estimator.get_params().keys():
estimator.set_params(random_state=random_seed)
np.random.seed(random_seed)
searchspace = config.search_space()
strategy = config.strategy()
config_sha1 = config.sha1()
scoring = config.scoring()
project_name = config.project_name()
if is_msmbuilder_estimator(estimator):
print_msmbuilder_version()
print('\nLoading dataset...\n')
X, y = config.dataset()
print('Dataset contains %d element(s) with %s labels'
% (num_samples(X), 'out' if y is None else ''))
print('The elements have shape: [%s' %
', '.join([str(X[i].shape)
if isinstance(X[i], (np.ndarray, np.generic))
else '(%s,)' % num_samples(X[i])
for i in range(min(num_samples(X), 20))]), end='')
print(', ...]' if (num_samples(X) > 20) else ']')
print('Instantiated estimator:')
print(' %r' % estimator)
print(searchspace)
# set up cross-validation
cv = config.cv(X, y)
statuses = [None for _ in range(args.n_iters)]
# install a signal handler to print the footer before exiting
# from sigterm (e.g. PBS job kill)
def signal_hander(signum, frame):
print_footer(statuses, start_time, signum)
sys.exit(1)
signal.signal(signal.SIGTERM, signal_hander)
for i in range(args.n_iters):
print('\n' + '-'*70)
print('Beginning iteration %50s' % ('%d / %d' % (i+1, args.n_iters)))
print('-'*70)
try:
trial_id, params = initialize_trial(
strategy, searchspace, estimator, config_sha1=config_sha1,
project_name=project_name, sessionbuilder=config.trialscontext,
max_param_suggestion_retries=max_param_suggestion_retries)
except MaxParamSuggestionRetriesExceeded:
print('The search strategy failed to suggest a new set of params not already present in the database after {} attempts'.format(max_param_suggestion_retries))
break
s = run_single_trial(
estimator=estimator, params=params, trial_id=trial_id,
scoring=scoring, X=X, y=y, cv=cv, n_jobs=args.n_jobs,
sessionbuilder=config.trialscontext)
statuses[i] = s
print_footer(statuses, start_time)
def initialize_trial(strategy, searchspace, estimator, config_sha1,
project_name, sessionbuilder, max_param_suggestion_retries):
def build_full_params(xparams):
# make sure we get _all_ the parameters, including defaults on the
# estimator class, to save in the database
params = clone(estimator).set_params(**xparams).get_params()
params = dict((k, v) for k, v in iteritems(params)
if is_json_serializable(v) and
(k != 'steps'))
return params
with sessionbuilder() as session:
# requery the history ever iteration, because another worker
# process may have written to it in the mean time
history = [[t.parameters, t.test_scores, t.status]
for t in session.query(Trial).all()
if t.project_name == project_name]
print('History contains: %d trials' % len(history))
print('Choosing next hyperparameters with %s...' % strategy.short_name)
start = time.time()
if max_param_suggestion_retries is None:
params = strategy.suggest(history, searchspace)
full_params = build_full_params(params)
else:
for num_retries in range(max_param_suggestion_retries):
params = strategy.suggest(history, searchspace)
full_params = build_full_params(params)
if not strategy.is_repeated_suggestion(full_params, history):
break
else:
raise MaxParamSuggestionRetriesExceeded
print(' %r' % params)
print('(%s took %.3f s)\n' % (strategy.short_name,
time.time() - start))
assert len(params) == searchspace.n_dims
t = Trial(status='PENDING', parameters=full_params, host=gethostname(),
user=getuser(), started=datetime.now(),
config_sha1=config_sha1)
session.add(t)
session.commit()
trial_id = t.id
return trial_id, params
def run_single_trial(estimator, params, trial_id, scoring, X, y, cv, n_jobs,
sessionbuilder):
status = None
try:
score = fit_and_score_estimator(
estimator, params, cv=cv, scoring=scoring, X=X, y=y, n_jobs=n_jobs,
verbose=1)
with sessionbuilder() as session:
trial = session.query(Trial).get(trial_id)
trial.mean_test_score = score['mean_test_score']
trial.mean_train_score = score['mean_train_score']
trial.test_scores = score['test_scores']
trial.train_scores = score['train_scores']
trial.n_test_samples = score['n_test_samples']
trial.n_train_samples = score['n_train_samples']
trial.status = 'SUCCEEDED'
best_so_far = session.query(
func.max(Trial.mean_test_score)).first()
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Success! Model score = %f' % trial.mean_test_score)
print('(best score so far = %f)' %
max(trial.mean_test_score, best_so_far[0]))
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
trial.completed = datetime.now()
trial.elapsed = trial.completed - trial.started
session.commit()
status = trial.status
except Exception:
buf = cStringIO()
traceback.print_exc(file=buf)
with sessionbuilder() as session:
trial = session.query(Trial).get(trial_id)
trial.traceback = buf.getvalue()
trial.status = 'FAILED'
print('-'*78, file=sys.stderr)
print('Exception encountered while fitting model')
print('-'*78, file=sys.stderr)
traceback.print_exc(file=sys.stderr)
print('-'*78, file=sys.stderr)
session.commit()
status = trial.status
except (KeyboardInterrupt, SystemExit):
with sessionbuilder() as session:
trial = session.query(Trial).get(trial_id)
trial.status = 'FAILED'
session.commit()
sys.exit(1)
return status
def print_header():
print('='*70)
print('= osprey is a tool for machine learning '
'hyperparameter optimization. =')
print('='*70)
print()
print('osprey version: %s' % __version__)
print('time: %s' % current_pretty_time())
print('hostname: %s' % gethostname())
print('cwd: %s' % os.path.abspath(os.curdir))
print('pid: %s' % os.getpid())
print()
def print_msmbuilder_version():
from msmbuilder.version import full_version as msmb_version
from mdtraj.version import full_version as mdtraj_version
print()
print('msmbuilder version: %s' % msmb_version)
print('mdtraj version: %s' % mdtraj_version)
print()
def print_footer(statuses, start_time, signum=None):
n_successes = sum(s == 'SUCCEEDED' for s in statuses)
elapsed = format_timedelta(datetime.now() - start_time)
print()
if signum is not None:
sigmap = dict((k, v) for v, k in iteritems(signal.__dict__)
if v.startswith('SIG'))
signame = sigmap.get(signum, 'Unknown')
print('== osprey worker received signal %s!' % signame,
file=sys.stderr)
print('== exiting immediately.', file=sys.stderr)
print('%d/%d models fit successfully.' % (n_successes, len(statuses)))
print('time: %s' % current_pretty_time())
print('elapsed: %s.' % elapsed)
print('osprey worker exiting.')
|
pandegroup/osprey
|
osprey/execute_worker.py
|
Python
|
apache-2.0
| 9,192
|
[
"MDTraj"
] |
535e83c1fd1c6472287b58ed5bd7022a9d8aeb3207dc7a34c9fb21d109d42345
|
import os
import shutil
import mkdocs
import MooseDocs
import logging
log = logging.getLogger(__name__)
def build_options(parser, subparser):
"""
Command-line options for build command.
"""
build_parser = subparser.add_parser('build', help='Generate and Build the documentation for serving.')
return build_parser
def update_extra():
"""
Loop through the js/css directories of MOOSE, if the file in the local build is older than the one in MOOSE
then copy the new one from MOOSE.
"""
for d in ['js', 'css']:
loc = os.path.join(MooseDocs.MOOSE_DIR, 'docs', d)
for root, dirs, files in os.walk(loc):
for filename in files:
src = os.path.join(loc, filename)
dst = os.path.join(d, filename)
if (not os.path.exists(dst)) or (os.path.getmtime(src) > os.path.getmtime(dst)):
dst_dir = os.path.dirname(dst)
if not os.path.exists(dst_dir):
log.debug('Creating {} directory.'.format(d))
os.makedirs(dst_dir)
log.debug('Copying file {} --> {}'.format(src, dst))
shutil.copy(src, dst)
def build(config_file='moosedocs.yml', live_server=False, pages='pages.yml', page_keys=[], clean_site_dir=False, **kwargs):
"""
Build the documentation using mkdocs build command.
Args:
config_file[str]: (Default: 'mkdocs.yml') The configure file to pass to mkdocs.
"""
pages = MooseDocs.load_pages(pages, keys=page_keys)
config = mkdocs.config.load_config(config_file, pages=pages, **kwargs)
update_extra()
mkdocs.commands.build.build(config)
mkdocs.utils.copy_media_files(config['docs_dir'], config['site_dir'])
return config
|
vityurkiv/Ox
|
python/MooseDocs/commands/build.py
|
Python
|
lgpl-2.1
| 1,652
|
[
"MOOSE"
] |
46b6d25b564fc0722d4d8b6b481ebed2add8c1cd2229592d42e045d8c0f3b711
|
#!/usr/bin/env python
############################################################################
# Copyright (c) 2011-2014 Saint-Petersburg Academic University
# All Rights Reserved
# See file LICENSE for details.
############################################################################
import os
import shutil
from site import addsitedir
from distutils import dir_util
import sys
import getopt
import logging
import platform
import errno
import spades_init
spades_init.init()
spades_home = spades_init.spades_home
bin_home = spades_init.bin_home
python_modules_home = spades_init.python_modules_home
ext_python_modules_home = spades_init.ext_python_modules_home
spades_version = spades_init.spades_version
import support
support.check_python_version()
from process_cfg import merge_configs, empty_config, load_config_from_file
import hammer_logic
import spades_logic
import options_storage
addsitedir(ext_python_modules_home)
if sys.version.startswith('2.'):
import pyyaml2 as pyyaml
elif sys.version.startswith('3.'):
import pyyaml3 as pyyaml
def print_used_values(cfg, log):
def print_value(cfg, section, param, pretty_param="", margin=" "):
if not pretty_param:
pretty_param = param.capitalize().replace('_', ' ')
line = margin + pretty_param
if param in cfg[section].__dict__:
line += ": " + str(cfg[section].__dict__[param])
else:
if param.find("offset") != -1:
line += " will be auto-detected"
log.info(line)
log.info("")
# system info
log.info("System information:")
try:
log.info(" rnaSPAdes version: " + str(spades_version).strip())
log.info(" Python version: " + ".".join(map(str, sys.version_info[0:3])))
# for more details: '[' + str(sys.version_info) + ']'
log.info(" OS: " + platform.platform())
# for more deatils: '[' + str(platform.uname()) + ']'
except Exception:
log.info(" Problem occurred when getting system information")
log.info("")
# main
print_value(cfg, "common", "output_dir", "", "")
if ("error_correction" in cfg) and (not "assembly" in cfg):
log.info("Mode: ONLY read error correction (without assembling)")
elif (not "error_correction" in cfg) and ("assembly" in cfg):
log.info("Mode: ONLY assembling (without read error correction)")
else:
log.info("Mode: read error correction and assembling")
if ("common" in cfg) and ("developer_mode" in cfg["common"].__dict__):
if cfg["common"].developer_mode:
log.info("Debug mode is turned ON")
else:
log.info("Debug mode is turned OFF")
log.info("")
# dataset
if "dataset" in cfg:
log.info("Dataset parameters:")
if cfg["dataset"].single_cell:
log.info(" Single-cell mode")
elif cfg["dataset"].rna:
log.info(" RNA mode")
else:
log.info(" Multi-cell mode (you should set '--sc' flag if input data"\
" was obtained with MDA (single-cell) technology")
if cfg["dataset"].iontorrent:
log.info(" IonTorrent data")
log.info(" Reads:")
dataset_data = pyyaml.load(open(cfg["dataset"].yaml_filename, 'r'))
dataset_data = support.relative2abs_paths(dataset_data, os.path.dirname(cfg["dataset"].yaml_filename))
support.pretty_print_reads(dataset_data, log)
# error correction
if "error_correction" in cfg:
log.info("Read error correction parameters:")
print_value(cfg, "error_correction", "max_iterations", "Iterations")
print_value(cfg, "error_correction", "qvoffset", "PHRED offset")
if cfg["error_correction"].gzip_output:
log.info(" Corrected reads will be compressed (with gzip)")
else:
log.info(" Corrected reads will NOT be compressed (with gzip)")
# assembly
if "assembly" in cfg:
log.info("Assembly parameters:")
if options_storage.auto_K_allowed():
log.info(" k: automatic selection based on read length")
else:
print_value(cfg, "assembly", "iterative_K", "k")
if cfg["assembly"].disable_rr:
log.info(" Repeat resolution is DISABLED")
else:
log.info(" Repeat resolution is enabled")
if "mismatch_corrector" in cfg:
log.info(" MismatchCorrector will be used")
else:
log.info(" MismatchCorrector will be SKIPPED")
if cfg["assembly"].cov_cutoff == 'off':
log.info(" Coverage cutoff is turned OFF")
elif cfg["assembly"].cov_cutoff == 'auto':
log.info(" Coverage cutoff is turned ON and threshold will be auto-detected")
else:
log.info(" Coverage cutoff is turned ON and threshold is " + str(cfg["assembly"].cov_cutoff))
log.info("Other parameters:")
print_value(cfg, "common", "tmp_dir", "Dir for temp files")
print_value(cfg, "common", "max_threads", "Threads")
print_value(cfg, "common", "max_memory", "Memory limit (in Gb)", " ")
log.info("")
def fill_cfg(options_to_parse, log):
try:
options, not_options = getopt.gnu_getopt(options_to_parse, options_storage.short_options, options_storage.long_options)
except getopt.GetoptError:
_, exc, _ = sys.exc_info()
sys.stderr.write(str(exc) + "\n")
sys.stderr.flush()
options_storage.usage(spades_version)
sys.exit(1)
if not options:
options_storage.usage(spades_version)
sys.exit(1)
if len(not_options) > 1:
for opt, arg in options:
if opt == "-k" and arg.strip().endswith(','):
support.error("Do not put spaces after commas in the list of k-mers sizes! Correct example: -k 21,33,55", log)
support.error("Please specify option (e.g. -1, -2, -s, etc) for the following paths: " + ", ".join(not_options[1:]) + "\n", log)
# all parameters are stored here
cfg = dict()
# dataset is stored here. We are prepared for up to MAX_LIBS_NUMBER for each type of short-reads libs
dataset_data = [{} for i in range(options_storage.MAX_LIBS_NUMBER *
len(options_storage.SHORT_READS_TYPES.keys()) +
len(options_storage.LONG_READS_TYPES))] # "[{}]*num" doesn't work here!
# for parsing options from "previous run command"
options_storage.continue_mode = False
options_storage.k_mers = None
options_storage.rna = True
for opt, arg in options:
if opt == '-o':
options_storage.output_dir = os.path.abspath(arg)
elif opt == "--tmp-dir":
options_storage.tmp_dir = os.path.abspath(arg)
elif opt == "--configs-dir":
options_storage.configs_dir = support.check_dir_existence(arg)
elif opt == "--reference":
options_storage.reference = support.check_file_existence(arg, 'reference', log)
elif opt == "--dataset":
options_storage.dataset_yaml_filename = support.check_file_existence(arg, 'dataset', log)
elif opt in options_storage.reads_options:
support.add_to_dataset(opt, arg, dataset_data)
elif opt == '-k':
if arg == 'auto':
options_storage.k_mers = arg
else:
options_storage.k_mers = list(map(int, arg.split(",")))
for k in options_storage.k_mers:
if k < options_storage.MIN_K or k > options_storage.MAX_K:
support.error('wrong k value ' + str(k) + ': all k values should be between %d and %d' %
(options_storage.MIN_K, options_storage.MAX_K), log)
if k % 2 == 0:
support.error('wrong k value ' + str(k) + ': all k values should be odd', log)
elif opt == "--sc":
options_storage.single_cell = True
elif opt == "--rna":
options_storage.rna = True
elif opt == "--iontorrent":
options_storage.iontorrent = True
elif opt == "--disable-gzip-output":
options_storage.disable_gzip_output = True
elif opt == "--disable-gzip-output:false":
options_storage.disable_gzip_output = False
elif opt == "--disable-rr":
options_storage.disable_rr = True
elif opt == "--disable-rr:false":
options_storage.disable_rr = False
elif opt == "--only-error-correction":
if options_storage.only_assembler:
support.error('you cannot specify --only-error-correction and --only-assembler simultaneously')
options_storage.only_error_correction = True
elif opt == "--only-assembler":
if options_storage.only_error_correction:
support.error('you cannot specify --only-error-correction and --only-assembler simultaneously')
options_storage.only_assembler = True
elif opt == "--read-buffer-size":
options_storage.read_buffer_size = int(arg)
elif opt == "--bh-heap-check":
options_storage.bh_heap_check = arg
elif opt == "--spades-heap-check":
options_storage.spades_heap_check = arg
elif opt == "--continue":
options_storage.continue_mode = True
elif opt == "--restart-from":
if arg not in ['ec', 'as', 'mc'] and not arg.startswith('k'):
support.error("wrong value for --restart-from option: " + arg +
" (should be 'ec', 'as', 'k<int>', or 'mc'", log)
options_storage.continue_mode = True
options_storage.restart_from = arg
elif opt == '-t' or opt == "--threads":
options_storage.threads = int(arg)
elif opt == '-m' or opt == "--memory":
options_storage.memory = int(arg)
elif opt == "--phred-offset":
if arg == 'auto':
options_storage.qvoffset = arg
elif arg in ['33', '64']:
options_storage.qvoffset = int(arg)
else:
support.error('wrong PHRED quality offset value: ' + arg +
' (should be either 33, 64, or \'auto\')', log)
elif opt == "--cov-cutoff":
if arg == 'auto' or arg == 'off':
options_storage.cov_cutoff = arg
elif support.is_float(arg) and float(arg) > 0.0:
options_storage.cov_cutoff = float(arg)
else:
support.error('wrong value for --cov-cutoff option: ' + arg +
' (should be a positive float number, or \'auto\', or \'off\')', log)
elif opt == '-i' or opt == "--iterations":
options_storage.iterations = int(arg)
elif opt == "--debug":
options_storage.developer_mode = True
elif opt == "--debug:false":
options_storage.developer_mode = False
#corrector
elif opt == "--mismatch-correction":
options_storage.mismatch_corrector = True
elif opt == "--mismatch-correction:false":
options_storage.mismatch_corrector = False
elif opt == "--careful":
support.error('Option --careful is deprecated in rnaSPAdes pipeline, please remove it from the command line!', log)
options_storage.mismatch_corrector = True
options_storage.careful = True
elif opt == "--careful:false":
options_storage.mismatch_corrector = False
options_storage.careful = False
elif opt == '-h' or opt == "--help":
options_storage.usage(spades_version)
sys.exit(0)
elif opt == "--help-hidden":
options_storage.usage(spades_version, True)
sys.exit(0)
elif opt == "--test":
options_storage.set_test_options()
support.add_to_dataset('-1', os.path.join(spades_home, "test_dataset/ecoli_1K_1.fq.gz"), dataset_data)
support.add_to_dataset('-2', os.path.join(spades_home, "test_dataset/ecoli_1K_2.fq.gz"), dataset_data)
#break
elif opt == "--diploid":
options_storage.diploid_mode = True
elif opt == "--draft-assembly":
options_storage.draft_assembly = True
elif opt == "--draft-assembly:false":
options_storage.draft_assembly = False
elif opt == "--min-complete-transcript":
options_storage.min_complete_transcript = int(arg)
else:
raise ValueError
if not options_storage.output_dir:
support.error("the output_dir is not set! It is a mandatory parameter (-o output_dir).", log)
if not os.path.isdir(options_storage.output_dir):
if options_storage.continue_mode:
support.error("the output_dir should exist for --continue and for --restart-from!", log)
os.makedirs(options_storage.output_dir)
if options_storage.restart_from:
if options_storage.continue_mode: # saving parameters specified with --restart-from
if not support.dataset_is_empty(dataset_data):
support.error("you cannot specify reads with --restart-from option!", log)
options_storage.save_restart_options(log)
else: # overriding previous run parameters
options_storage.load_restart_options()
if options_storage.continue_mode:
return None, None
if options_storage.dataset_yaml_filename:
try:
dataset_data = pyyaml.load(open(options_storage.dataset_yaml_filename, 'r'))
except pyyaml.YAMLError:
_, exc, _ = sys.exc_info()
support.error('exception caught while parsing YAML file (' + options_storage.dataset_yaml_filename + '):\n' + str(exc))
dataset_data = support.relative2abs_paths(dataset_data, os.path.dirname(options_storage.dataset_yaml_filename))
else:
dataset_data = support.correct_dataset(dataset_data)
dataset_data = support.relative2abs_paths(dataset_data, os.getcwd())
options_storage.dataset_yaml_filename = os.path.join(options_storage.output_dir, "input_dataset.yaml")
pyyaml.dump(dataset_data, open(options_storage.dataset_yaml_filename, 'w'))
support.check_dataset_reads(dataset_data, options_storage.only_assembler, log)
if not support.get_lib_ids_by_type(dataset_data, spades_logic.READS_TYPES_USED_IN_CONSTRUCTION):
support.error('you should specify at least one unpaired, paired-end, or high-quality mate-pairs library!')
options_storage.set_default_values()
### FILLING cfg
cfg["common"] = empty_config()
cfg["dataset"] = empty_config()
if not options_storage.only_assembler:
cfg["error_correction"] = empty_config()
if not options_storage.only_error_correction:
cfg["assembly"] = empty_config()
# common
cfg["common"].__dict__["output_dir"] = options_storage.output_dir
cfg["common"].__dict__["tmp_dir"] = options_storage.tmp_dir
cfg["common"].__dict__["max_threads"] = options_storage.threads
cfg["common"].__dict__["max_memory"] = options_storage.memory
cfg["common"].__dict__["developer_mode"] = options_storage.developer_mode
# dataset section
cfg["dataset"].__dict__["single_cell"] = options_storage.single_cell
cfg["dataset"].__dict__["rna"] = options_storage.rna
cfg["dataset"].__dict__["iontorrent"] = options_storage.iontorrent
cfg["dataset"].__dict__["yaml_filename"] = options_storage.dataset_yaml_filename
if options_storage.developer_mode and options_storage.reference:
cfg["dataset"].__dict__["reference"] = options_storage.reference
# error correction
if (not options_storage.only_assembler) and (options_storage.iterations > 0):
cfg["error_correction"].__dict__["output_dir"] = os.path.join(cfg["common"].output_dir, "corrected")
cfg["error_correction"].__dict__["max_iterations"] = options_storage.iterations
cfg["error_correction"].__dict__["gzip_output"] = not options_storage.disable_gzip_output
if options_storage.qvoffset:
cfg["error_correction"].__dict__["qvoffset"] = options_storage.qvoffset
if options_storage.bh_heap_check:
cfg["error_correction"].__dict__["heap_check"] = options_storage.bh_heap_check
cfg["error_correction"].__dict__["iontorrent"] = options_storage.iontorrent
# assembly
if not options_storage.only_error_correction:
if options_storage.k_mers:
cfg["assembly"].__dict__["iterative_K"] = options_storage.k_mers
elif options_storage.draft_assembly:
cfg["assembly"].__dict__["iterative_K"] = options_storage.K_MERS_DRAFT
else:
cfg["assembly"].__dict__["iterative_K"] = options_storage.K_MERS_SHORT
cfg["assembly"].__dict__["careful"] = options_storage.careful
cfg["assembly"].__dict__["disable_rr"] = options_storage.disable_rr
cfg["assembly"].__dict__["diploid_mode"] = options_storage.diploid_mode
cfg["assembly"].__dict__["cov_cutoff"] = options_storage.cov_cutoff
if options_storage.spades_heap_check:
cfg["assembly"].__dict__["heap_check"] = options_storage.spades_heap_check
if options_storage.read_buffer_size:
cfg["assembly"].__dict__["read_buffer_size"] = options_storage.read_buffer_size
#corrector can work only if contigs exist (not only error correction)
if (not options_storage.only_error_correction) and options_storage.mismatch_corrector:
cfg["mismatch_corrector"] = empty_config()
cfg["mismatch_corrector"].__dict__["skip-masked"] = None
cfg["mismatch_corrector"].__dict__["bwa"] = os.path.join(bin_home, "bwa-spades")
cfg["mismatch_corrector"].__dict__["threads"] = options_storage.threads
cfg["mismatch_corrector"].__dict__["output-dir"] = options_storage.output_dir
#rna
if options_storage.min_complete_transcript:
cfg["assembly"].__dict__["min_complete_transcript"] = options_storage.min_complete_transcript
return cfg, dataset_data
def check_cfg_for_restart_from(cfg):
if options_storage.restart_from == 'ec' and ("error_correction" not in cfg):
support.error("failed to restart from read error correction because this stage was not specified!")
if options_storage.restart_from == 'mc' and ("mismatch_corrector" not in cfg):
support.error("failed to restart from mismatch correction because this stage was not specified!")
if options_storage.restart_from == 'as' or options_storage.restart_from.startswith('k'):
if "assembly" not in cfg:
support.error("failed to restart from assembling because this stage was not specified!")
if options_storage.restart_from.startswith('k'):
correct_k = False
k_to_check = options_storage.k_mers
if not k_to_check:
if options_storage.auto_K_allowed():
k_to_check = list(set(options_storage.K_MERS_SHORT + options_storage.K_MERS_150 + options_storage.K_MERS_250))
else:
k_to_check = options_storage.K_MERS_SHORT
for k in k_to_check:
if options_storage.restart_from == ("k%d" % k) or options_storage.restart_from.startswith("k%d:" % k):
correct_k = True
break
if not correct_k:
k_str = options_storage.restart_from[1:]
if k_str.find(":") != -1:
k_str = k_str[:k_str.find(":")]
support.error("failed to restart from K=%s because this K was not specified!" % k_str)
def get_options_from_params(params_filename, spades_py_name=None):
if not os.path.isfile(params_filename):
return None, None
params = open(params_filename, 'r')
cmd_line = params.readline().strip()
spades_prev_version = None
for line in params:
if line.find('rnaSPAdes version:') != -1:
spades_prev_version = line.split('rnaSPAdes version:')[1]
break
params.close()
if spades_prev_version is None:
support.error("failed to parse rnaSPAdes version of the previous run! "
"Please restart from the beginning or specify another output directory.")
if spades_prev_version.strip() != spades_version.strip():
support.error("rnaSPAdes version of the previous run (%s) is not equal to the current version of rnaSPAdes (%s)! "
"Please restart from the beginning or specify another output directory."
% (spades_prev_version.strip(), spades_version.strip()))
if spades_py_name is None or cmd_line.find(os.path.basename(spades_py_name)) == -1:
spades_py_name = 'spades.py' # try default name
else:
spades_py_name = os.path.basename(spades_py_name)
spades_py_pos = cmd_line.find(spades_py_name)
if spades_py_pos == -1:
return None, None
return cmd_line, cmd_line[spades_py_pos + len(spades_py_name):].split()
def main(args):
os.environ["LC_ALL"] = "C"
if len(args) == 1:
options_storage.usage(spades_version)
sys.exit(0)
log = logging.getLogger('spades')
log.setLevel(logging.DEBUG)
console = logging.StreamHandler(sys.stdout)
console.setFormatter(logging.Formatter('%(message)s'))
console.setLevel(logging.DEBUG)
log.addHandler(console)
support.check_binaries(bin_home, log)
# parse options and safe all parameters to cfg
options = args
cfg, dataset_data = fill_cfg(options, log)
if options_storage.continue_mode:
cmd_line, options = get_options_from_params(os.path.join(options_storage.output_dir, "params.txt"), args[0])
if not options:
support.error("failed to parse command line of the previous run! Please restart from the beginning or specify another output directory.")
cfg, dataset_data = fill_cfg(options, log)
if options_storage.restart_from:
check_cfg_for_restart_from(cfg)
options_storage.continue_mode = True
log_filename = os.path.join(cfg["common"].output_dir, "spades.log")
if options_storage.continue_mode:
log_handler = logging.FileHandler(log_filename, mode='a')
else:
log_handler = logging.FileHandler(log_filename, mode='w')
log.addHandler(log_handler)
if options_storage.continue_mode:
log.info("\n======= rnaSPAdes pipeline continued. Log can be found here: " + log_filename + "\n")
log.info("Restored from " + cmd_line)
if options_storage.restart_from:
updated_params = ""
flag = False
for v in args[1:]:
if v == '-o' or v == '--restart-from':
flag = True
continue
if flag:
flag = False
continue
updated_params += " " + v
updated_params = updated_params.strip()
log.info("with updated parameters: " + updated_params)
cmd_line += " " + updated_params
log.info("")
params_filename = os.path.join(cfg["common"].output_dir, "params.txt")
params_handler = logging.FileHandler(params_filename, mode='w')
log.addHandler(params_handler)
if options_storage.continue_mode:
log.info(cmd_line)
else:
command = "Command line:"
for v in args:
command += " " + v
log.info(command)
# special case
# if "mismatch_corrector" in cfg and not support.get_lib_ids_by_type(dataset_data, 'paired-end'):
# support.warning('cannot perform mismatch correction without at least one paired-end library! Skipping this step.', log)
# del cfg["mismatch_corrector"]
print_used_values(cfg, log)
log.removeHandler(params_handler)
support.check_single_reads_in_options(options, log)
if not options_storage.continue_mode:
log.info("\n======= rnaSPAdes pipeline started. Log can be found here: " + log_filename + "\n")
# splitting interlaced reads and processing Ns in additional contigs if needed
if support.dataset_has_interlaced_reads(dataset_data) or support.dataset_has_additional_contigs(dataset_data)\
or support.dataset_has_nxmate_reads(dataset_data):
dir_for_split_reads = os.path.join(options_storage.output_dir, 'split_input')
if support.dataset_has_interlaced_reads(dataset_data) or support.dataset_has_nxmate_reads(dataset_data):
if not os.path.isdir(dir_for_split_reads):
os.makedirs(dir_for_split_reads)
if support.dataset_has_interlaced_reads(dataset_data):
dataset_data = support.split_interlaced_reads(dataset_data, dir_for_split_reads, log)
if support.dataset_has_nxmate_reads(dataset_data):
dataset_data = support.process_nxmate_reads(dataset_data, dir_for_split_reads, log)
if support.dataset_has_additional_contigs(dataset_data):
dataset_data = support.process_Ns_in_additional_contigs(dataset_data, dir_for_split_reads, log)
options_storage.dataset_yaml_filename = os.path.join(options_storage.output_dir, "input_dataset.yaml")
pyyaml.dump(dataset_data, open(options_storage.dataset_yaml_filename, 'w'))
cfg["dataset"].yaml_filename = options_storage.dataset_yaml_filename
try:
# copying configs before all computations (to prevent its changing at run time)
tmp_configs_dir = os.path.join(cfg["common"].output_dir, "configs")
if os.path.isdir(tmp_configs_dir) and not options_storage.continue_mode:
shutil.rmtree(tmp_configs_dir)
if not os.path.isdir(tmp_configs_dir):
if options_storage.configs_dir:
dir_util.copy_tree(options_storage.configs_dir, tmp_configs_dir, preserve_times=False)
else:
dir_util.copy_tree(os.path.join(spades_home, "configs"), tmp_configs_dir, preserve_times=False)
corrected_dataset_yaml_filename = ''
if "error_correction" in cfg:
STAGE_NAME = "Read error correction"
bh_cfg = merge_configs(cfg["error_correction"], cfg["common"])
corrected_dataset_yaml_filename = os.path.join(bh_cfg.output_dir, "corrected.yaml")
if os.path.isfile(corrected_dataset_yaml_filename) and options_storage.continue_mode \
and not options_storage.restart_from == "ec":
log.info("\n===== Skipping %s (already processed). \n" % STAGE_NAME)
else:
support.continue_from_here(log)
if "HEAPCHECK" in os.environ:
del os.environ["HEAPCHECK"]
if "heap_check" in bh_cfg.__dict__:
os.environ["HEAPCHECK"] = bh_cfg.heap_check
if os.path.exists(bh_cfg.output_dir):
shutil.rmtree(bh_cfg.output_dir)
os.makedirs(bh_cfg.output_dir)
if support.get_lib_ids_by_type(dataset_data, options_storage.LONG_READS_TYPES):
not_used_dataset_data = support.get_libs_by_type(dataset_data, options_storage.LONG_READS_TYPES)
to_correct_dataset_data = support.rm_libs_by_type(dataset_data, options_storage.LONG_READS_TYPES)
to_correct_dataset_yaml_filename = os.path.join(bh_cfg.output_dir, "to_correct.yaml")
pyyaml.dump(to_correct_dataset_data, open(to_correct_dataset_yaml_filename, 'w'))
bh_cfg.__dict__["dataset_yaml_filename"] = to_correct_dataset_yaml_filename
else:
not_used_dataset_data = None
bh_cfg.__dict__["dataset_yaml_filename"] = cfg["dataset"].yaml_filename
log.info("\n===== %s started. \n" % STAGE_NAME)
hammer_logic.run_hammer(corrected_dataset_yaml_filename, tmp_configs_dir, bin_home, bh_cfg, not_used_dataset_data,
ext_python_modules_home, log)
log.info("\n===== %s finished. \n" % STAGE_NAME)
result_contigs_filename = os.path.join(cfg["common"].output_dir, "contigs.fasta")
result_scaffolds_filename = os.path.join(cfg["common"].output_dir, "scaffolds.fasta")
misc_dir = os.path.join(cfg["common"].output_dir, "misc")
### if mismatch correction is enabled then result contigs are copied to misc directory
assembled_contigs_filename = os.path.join(misc_dir, "assembled_contigs.fasta")
assembled_scaffolds_filename = os.path.join(misc_dir, "assembled_scaffolds.fasta")
if "assembly" in cfg:
STAGE_NAME = "Assembling"
spades_cfg = merge_configs(cfg["assembly"], cfg["common"])
spades_cfg.__dict__["result_contigs"] = result_contigs_filename
spades_cfg.__dict__["result_scaffolds"] = result_scaffolds_filename
if options_storage.continue_mode and (os.path.isfile(spades_cfg.result_contigs)
or ("mismatch_corrector" in cfg and
os.path.isfile(assembled_contigs_filename)))\
and not options_storage.restart_from == 'as' \
and not (options_storage.restart_from and options_storage.restart_from.startswith('k')):
log.info("\n===== Skipping %s (already processed). \n" % STAGE_NAME)
# calculating latest_dir for the next stages
latest_dir = support.get_latest_dir(os.path.join(spades_cfg.output_dir, "K*"))
if not latest_dir:
support.error("failed to continue the previous run! Please restart from previous stages or from the beginning.", log)
else:
old_result_files = [result_contigs_filename, result_scaffolds_filename,
assembled_contigs_filename, assembled_scaffolds_filename]
for format in [".fasta", ".fastg"]:
for old_result_file in old_result_files:
if os.path.isfile(old_result_file[:-6] + format):
os.remove(old_result_file[:-6] + format)
if options_storage.restart_from == 'as':
support.continue_from_here(log)
if os.path.isfile(corrected_dataset_yaml_filename):
dataset_data = pyyaml.load(open(corrected_dataset_yaml_filename, 'r'))
dataset_data = support.relative2abs_paths(dataset_data, os.path.dirname(corrected_dataset_yaml_filename))
if spades_cfg.disable_rr:
spades_cfg.__dict__["rr_enable"] = False
else:
spades_cfg.__dict__["rr_enable"] = True
if "HEAPCHECK" in os.environ:
del os.environ["HEAPCHECK"]
if "heap_check" in spades_cfg.__dict__:
os.environ["HEAPCHECK"] = spades_cfg.heap_check
log.info("\n===== %s started.\n" % STAGE_NAME)
# creating dataset
dataset_filename = os.path.join(spades_cfg.output_dir, "dataset.info")
if not os.path.isfile(dataset_filename) or not options_storage.continue_mode:
dataset_file = open(dataset_filename, 'w')
import process_cfg
dataset_file.write("single_cell" + '\t' + process_cfg.bool_to_str(cfg["dataset"].single_cell) + '\n')
dataset_file.write("rna" + '\t' + process_cfg.bool_to_str(cfg["dataset"].rna) + '\n')
if os.path.isfile(corrected_dataset_yaml_filename):
dataset_file.write("reads" + '\t' + process_cfg.process_spaces(corrected_dataset_yaml_filename) + '\n')
else:
dataset_file.write("reads" + '\t' + process_cfg.process_spaces(cfg["dataset"].yaml_filename) + '\n')
if spades_cfg.developer_mode and "reference" in cfg["dataset"].__dict__:
dataset_file.write("reference_genome" + '\t')
dataset_file.write(process_cfg.process_spaces(cfg["dataset"].reference) + '\n')
dataset_file.close()
spades_cfg.__dict__["dataset"] = dataset_filename
latest_dir = spades_logic.run_spades(tmp_configs_dir, bin_home, spades_cfg, dataset_data, ext_python_modules_home, log)
if os.path.isdir(misc_dir) and not options_storage.continue_mode:
shutil.rmtree(misc_dir)
if not os.path.isdir(misc_dir):
os.makedirs(misc_dir)
if options_storage.continue_mode and options_storage.restart_from and options_storage.restart_from.startswith('k'):
k_str = options_storage.restart_from[1:]
if k_str.find(":") != -1:
k_str = k_str[:k_str.find(":")]
support.error("failed to continue from K=%s because this K was not processed in the original run!" % k_str, log)
log.info("\n===== %s finished. \n" % STAGE_NAME)
#corrector
if "mismatch_corrector" in cfg and (os.path.isfile(result_contigs_filename) or
(options_storage.continue_mode and os.path.isfile(assembled_contigs_filename))):
STAGE_NAME = "Mismatch correction"
to_correct = dict()
to_correct["contigs"] = (result_contigs_filename, assembled_contigs_filename)
if os.path.isfile(result_scaffolds_filename) or (options_storage.continue_mode and
os.path.isfile(assembled_scaffolds_filename)):
to_correct["scaffolds"] = (result_scaffolds_filename, assembled_scaffolds_filename)
# moving assembled contigs (scaffolds) to misc dir
for assembly_type, (old, new) in to_correct.items():
if options_storage.continue_mode and os.path.isfile(new):
continue
for format in [".fasta", ".fastg"]:
if os.path.isfile(old[:-6] + format):
shutil.move(old[:-6] + format, new[:-6] + format)
if options_storage.continue_mode and os.path.isfile(result_contigs_filename) and \
(os.path.isfile(result_scaffolds_filename) or not os.path.isfile(assembled_scaffolds_filename)) \
and not options_storage.restart_from == 'mc':
log.info("\n===== Skipping %s (already processed). \n" % STAGE_NAME)
else:
if options_storage.restart_from == 'mc':
support.continue_from_here(log)
log.info("\n===== %s started." % STAGE_NAME)
# detecting paired-end library with the largest insert size
cfg["mismatch_corrector"].__dict__["dataset"] = cfg["dataset"].yaml_filename
#TODO: add reads orientation
import corrector_logic
corrector_cfg = cfg["mismatch_corrector"]
# processing contigs and scaffolds (or only contigs)
for assembly_type, (corrected, assembled) in to_correct.items():
if options_storage.continue_mode and os.path.isfile(corrected):
log.info("\n== Skipping processing of " + assembly_type + " (already processed)\n")
continue
support.continue_from_here(log)
log.info("\n== Processing of " + assembly_type + "\n")
tmp_dir_for_corrector = os.path.join(cfg["common"].output_dir, "mismatch_corrector", assembly_type)
cfg["mismatch_corrector"].__dict__["output_dir"] = tmp_dir_for_corrector
# correcting
corr_cfg = merge_configs(cfg["mismatch_corrector"], cfg["common"])
result_corrected_filename = os.path.join(tmp_dir_for_corrector, "corrected_contigs.fasta")
corrector_logic.run_corrector( tmp_configs_dir, bin_home, corr_cfg,
ext_python_modules_home, log, assembled, result_corrected_filename)
if os.path.isfile(result_corrected_filename):
shutil.copyfile(result_corrected_filename, corrected)
tmp_d = os.path.join(tmp_dir_for_corrector, "tmp")
if os.path.isdir(tmp_d) and not cfg["common"].developer_mode:
shutil.rmtree(tmp_d)
assembled_fastg = assembled[:-6] + ".fastg"
if os.path.isfile(assembled_fastg):
support.create_fastg_from_fasta(corrected, assembled_fastg, log)
log.info("\n===== %s finished.\n" % STAGE_NAME)
if not cfg["common"].developer_mode and os.path.isdir(tmp_configs_dir):
shutil.rmtree(tmp_configs_dir)
#log.info("")
if "error_correction" in cfg and os.path.isdir(os.path.dirname(corrected_dataset_yaml_filename)):
log.info(" * Corrected reads are in " + support.process_spaces(os.path.dirname(corrected_dataset_yaml_filename) + "/"))
if "assembly" in cfg and os.path.isfile(result_contigs_filename):
message = " * Assembled contigs are in " + support.process_spaces(result_contigs_filename)
if os.path.isfile(result_contigs_filename[:-6] + ".fastg"):
message += " (" + os.path.basename(result_contigs_filename[:-6] + ".fastg") + ")"
log.info(message)
if "assembly" in cfg and os.path.isfile(result_scaffolds_filename):
message = " * Assembled scaffolds are in " + support.process_spaces(result_scaffolds_filename)
if os.path.isfile(result_scaffolds_filename[:-6] + ".fastg"):
message += " (" + os.path.basename(result_scaffolds_filename[:-6] + ".fastg") + ")"
log.info(message)
#log.info("")
#breaking scaffolds
if os.path.isfile(result_scaffolds_filename):
if not os.path.isdir(misc_dir):
os.makedirs(misc_dir)
result_broken_scaffolds = os.path.join(misc_dir, "broken_scaffolds.fasta")
if not os.path.isfile(result_broken_scaffolds) or not options_storage.continue_mode:
modified, broken_scaffolds = support.break_scaffolds(result_scaffolds_filename,
options_storage.THRESHOLD_FOR_BREAKING_SCAFFOLDS)
if modified:
support.write_fasta(result_broken_scaffolds, broken_scaffolds)
#log.info(" * Scaffolds broken by " + str(options_storage.THRESHOLD_FOR_BREAKING_SCAFFOLDS) +
# " Ns are in " + result_broken_scaffolds)
### printing WARNINGS SUMMARY
if not support.log_warnings(log):
log.info("\n======= rnaSPAdes pipeline finished.") # otherwise it finished WITH WARNINGS
if options_storage.test_mode:
for result_filename in [result_contigs_filename, result_scaffolds_filename]:
if os.path.isfile(result_filename):
result_fasta = list(support.read_fasta(result_filename))
# correctness check: should be one contig of length 1000 bp
correct_number = 1
correct_length = 1000
if not len(result_fasta):
support.error("TEST FAILED: %s does not contain contigs!" % result_filename)
elif len(result_fasta) > correct_number:
support.error("TEST FAILED: %s contains more than %d contig (%d)!" %
(result_filename, correct_number, len(result_fasta)))
elif len(result_fasta[0][1]) != correct_length:
if len(result_fasta[0][1]) > correct_length:
relation = "more"
else:
relation = "less"
support.error("TEST FAILED: %s contains %s than %d bp (%d bp)!" %
(result_filename, relation, correct_length, len(result_fasta[0][1])))
else:
support.error("TEST FAILED: " + result_filename + " does not exist!")
log.info("\n========= TEST PASSED CORRECTLY.")
log.info("\nrnaSPAdes log can be found here: " + log_filename)
log.info("")
log.info("Thank you for using rnaSPAdes!")
log.removeHandler(log_handler)
except Exception:
exc_type, exc_value, _ = sys.exc_info()
if exc_type == SystemExit:
sys.exit(exc_value)
else:
if exc_type == OSError and exc_value.errno == errno.ENOEXEC: # Exec format error
support.error("It looks like you are using rnaSPAdes binaries for another platform.\n" +
support.get_spades_binaries_info_message())
else:
log.exception(exc_value)
support.error("exception caught: %s" % exc_type, log)
except BaseException: # since python 2.5 system-exiting exceptions (e.g. KeyboardInterrupt) are derived from BaseException
exc_type, exc_value, _ = sys.exc_info()
if exc_type == SystemExit:
sys.exit(exc_value)
else:
log.exception(exc_value)
support.error("exception caught: %s" % exc_type, log)
if __name__ == '__main__':
main(sys.argv)
|
fmaguire/BayeHem
|
bayehem/assembler/rnaSPAdes-0.1.1-Linux/bin/rnaspades.py
|
Python
|
apache-2.0
| 42,177
|
[
"BWA"
] |
8042d3db643abbe93898cf12ae1181f3edb12a0efc6c750da6a39b8861cadc56
|
# -*- coding: utf-8 -*-
# Copyright (C) 2004-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
# Authors: Salim Fadhley <salimfadhley@gmail.com>
# Matteo Dell'Amico <matteodellamico@gmail.com>
"""Shortest paths and path lengths using the A* ("A star") algorithm.
"""
from heapq import heappush, heappop
from itertools import count
import networkx as nx
from networkx import NetworkXError
from networkx.utils import not_implemented_for
__all__ = ['astar_path', 'astar_path_length']
@not_implemented_for('multigraph')
def astar_path(G, source, target, heuristic=None, weight='weight'):
"""Return a list of nodes in a shortest path between source and target
using the A* ("A-star") algorithm.
There may be more than one shortest path. This returns only one.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path
target : node
Ending node for path
heuristic : function
A function to evaluate the estimate of the distance
from the a node to the target. The function takes
two nodes arguments and must return a number.
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight.
Raises
------
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G=nx.path_graph(5)
>>> print(nx.astar_path(G,0,4))
[0, 1, 2, 3, 4]
>>> G=nx.grid_graph(dim=[3,3]) # nodes are two-tuples (x,y)
>>> def dist(a, b):
... (x1, y1) = a
... (x2, y2) = b
... return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
>>> print(nx.astar_path(G,(0,0),(2,2),dist))
[(0, 0), (0, 1), (1, 1), (1, 2), (2, 2)]
See Also
--------
shortest_path, dijkstra_path
"""
if heuristic is None:
# The default heuristic is h=0 - same as Dijkstra's algorithm
def heuristic(u, v):
return 0
push = heappush
pop = heappop
# The queue stores priority, node, cost to reach, and parent.
# Uses Python heapq to keep in priority order.
# Add a counter to the queue to prevent the underlying heap from
# attempting to compare the nodes themselves. The hash breaks ties in the
# priority and is guarenteed unique for all nodes in the graph.
c = count()
queue = [(0, next(c), source, 0, None)]
# Maps enqueued nodes to distance of discovered paths and the
# computed heuristics to target. We avoid computing the heuristics
# more than once and inserting the node into the queue too many times.
enqueued = {}
# Maps explored nodes to parent closest to the source.
explored = {}
while queue:
# Pop the smallest item from queue.
_, __, curnode, dist, parent = pop(queue)
if curnode == target:
path = [curnode]
node = parent
while node is not None:
path.append(node)
node = explored[node]
path.reverse()
return path
if curnode in explored:
continue
explored[curnode] = parent
for neighbor, w in G[curnode].items():
if neighbor in explored:
continue
ncost = dist + w.get(weight, 1)
if neighbor in enqueued:
qcost, h = enqueued[neighbor]
# if qcost < ncost, a longer path to neighbor remains
# enqueued. Removing it would need to filter the whole
# queue, it's better just to leave it there and ignore
# it when we visit the node a second time.
if qcost <= ncost:
continue
else:
h = heuristic(neighbor, target)
enqueued[neighbor] = ncost, h
push(queue, (ncost + h, next(c), neighbor, ncost, curnode))
raise nx.NetworkXNoPath("Node %s not reachable from %s" % (source, target))
def astar_path_length(G, source, target, heuristic=None, weight='weight'):
"""Return the length of the shortest path between source and target using
the A* ("A-star") algorithm.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path
target : node
Ending node for path
heuristic : function
A function to evaluate the estimate of the distance
from the a node to the target. The function takes
two nodes arguments and must return a number.
Raises
------
NetworkXNoPath
If no path exists between source and target.
See Also
--------
astar_path
"""
path = astar_path(G, source, target, heuristic, weight)
return sum(G[u][v].get(weight, 1) for u, v in zip(path[:-1], path[1:]))
|
NvanAdrichem/networkx
|
networkx/algorithms/shortest_paths/astar.py
|
Python
|
bsd-3-clause
| 4,896
|
[
"VisIt"
] |
17b9c0ff12f81c406ffd1c5431d6e53aae8919122b3d82985ef9b9daadb07253
|
from rewriter import *
from os.path import basename
import copy
from lan_parser import *
from cgen import *
from transformation import *
from analysis import *
fileprefix = "../test/C/"
SetNoReadBack = True
DoOptimizations = True
def LexAndParse(name, createTemp):
import ply.yacc as yacc
cparser = yacc.yacc()
lex.lex()
run = 1
while run:
filename = fileprefix + name + '/' + name + 'For.cpp'
funcname = basename(os.path.splitext(filename)[0])
try:
f = open(filename, 'r')
s = f.read()
f.close()
## print s
except EOFError:
break
lex.input(s)
while 1:
tok = lex.token()
if not tok: break
## print tok
ast = cparser.parse(s)
## ast.show()
## print ast
## print slist
cprint = CGenerator()
## printres = cprint.visit(ast)
## print printres
rw = Rewriter()
rw.initOriginal(ast)
tempfilename = fileprefix + name + '/'+'temp' +name.lower() + '.cpp'
if createTemp:
rw.rewrite(ast, funcname, changeAST = True)
cprint.createTemp(ast, filename = tempfilename)
run = 0
filename = tempfilename
## funcname = basename(os.path.splitext(filename)[0])
try:
f = open(filename, 'r')
s = f.read()
f.close()
except EOFError:
break
ast = cparser.parse(s)
## ## ast.show()
tempast = copy.deepcopy(ast)
tempast2 = copy.deepcopy(ast)
return (rw, ast, tempast, tempast2, funcname)
def CGen(name, funcname, an, tempast2, ast, kernelstringname = ''):
cprint = CGenerator()
rw = an.rw
an.generate_kernels(tempast2, name, fileprefix)
## rw.InSourceKernel(tempast2, filename = fileprefix + name + '/'+funcname + '.cl', kernelstringname = kernelstringname)
boilerast = rw.generateBoilerplateCode(ast)
cprint.createTemp(boilerast, filename = fileprefix + name + '/'+'boilerplate.cpp')
def matmul():
name = 'MatMul'
(rw, ast, tempast, tempast2, funcname) = LexAndParse(name, True)
rw.initNewRepr(tempast, dev='CPU')
tf = Transformation(rw)
an = Analysis(rw, tf)
if DoOptimizations:
an.Transpose()
an.DefineArguments()
an.PlaceInReg()
an.PlaceInLocalMemory()
if SetNoReadBack:
tf.SetNoReadBack()
## rw.DataStructures()
CGen(name, funcname, an, tempast2, ast)
def jacobi():
name = 'Jacobi'
(rw, ast, tempast, tempast2, funcname) = LexAndParse(name, True)
rw.initNewRepr(tempast, dev='CPU')
tf = Transformation(rw)
an = Analysis(rw, tf)
if DoOptimizations:
an.Transpose()
an.DefineArguments()
an.PlaceInReg()
tf.localMemory(['X1'], west = 1, north = 1, east = 1, south = 1, middle = 0)
an.PlaceInLocalMemory()
if SetNoReadBack:
tf.SetNoReadBack()
CGen(name, funcname, an, tempast2, ast)
def knearest():
name = 'KNearest'
(rw, ast, tempast, tempast2, funcname) = LexAndParse(name, True)
tf = Transformation(rw)
tf.SetParDim(1)
rw.initNewRepr(tempast, dev='CPU')
an = Analysis(rw, tf)
if DoOptimizations:
an.Transpose()
an.DefineArguments()
an.PlaceInReg()
an.PlaceInLocalMemory()
if SetNoReadBack:
tf.SetNoReadBack()
## rw.DataStructures()
## rw.Unroll2({'k' : 0})
CGen(name, funcname, an, tempast2, ast)
def nbody():
name = 'NBody'
(rw, ast, tempast, tempast2, funcname) = LexAndParse(name, True)
rw.initNewRepr(tempast, dev='CPU')
tf = Transformation(rw)
an = Analysis(rw, tf)
if DoOptimizations:
an.Transpose()
an.DefineArguments()
an.PlaceInReg()
an.PlaceInLocalMemory()
if SetNoReadBack:
tf.SetNoReadBack()
## rw.Unroll2({'j': 32})
CGen(name, funcname, an, tempast2, ast)
def laplace():
name = 'Laplace'
(rw, ast, tempast, tempast2, funcname) = LexAndParse(name, True)
tf = Transformation(rw)
tf.SetParDim(1)
rw.initNewRepr(tempast, dev='CPU')
an = Analysis(rw, tf)
if DoOptimizations:
an.Transpose()
an.DefineArguments()
an.PlaceInReg()
an.PlaceInLocalMemory()
else:
tf.SetDefine(['dim'])
if SetNoReadBack:
tf.SetNoReadBack()
## rw.DataStructures()
## tf.Unroll2({'d' : 0, 'd_outer' : 0, 'd_inner' : 0})
CGen(name, funcname, an, tempast2, ast)
def gaussian():
name = 'GaussianDerivates'
(rw, ast, tempast, tempast2, funcname) = LexAndParse(name, True)
## rw.SetParDim(1)
rw.initNewRepr(tempast, dev='CPU')
tf = Transformation(rw)
an = Analysis(rw, tf)
if DoOptimizations:
an.Transpose()
an.DefineArguments()
an.PlaceInReg()
an.PlaceInLocalMemory()
## tf.Unroll2({'k' : 0, 'd' : 0, 'g' : 0, 'b' : 0})
## rw.DataStructures()
if SetNoReadBack:
tf.SetNoReadBack()
CGen(name, funcname, an, tempast2, ast)
if __name__ == "__main__":
matmul()
jacobi()
knearest()
nbody()
laplace()
gaussian()
|
dikujepsen/OpenTran
|
v2.0/framework/main.py
|
Python
|
mit
| 5,346
|
[
"Gaussian",
"VisIt"
] |
badeef735dd86698174c8c7fee618de1ae3e47d0e4d87fa6f367dc96bd5c50f8
|
from datetime import datetime,timedelta
import itertools
def get_1mo_dates(inyr,inmo,indate,byear,eyear):
"""
Used with netCDF4 files and py-netCDF.
From 1985-2011, a number of days range to search
for dates centered around the given date and forecast time,
returns the applicable dates in a list of daetime objects.
In other words, returns a list of file name dates for us to
search for analogs/use in logistic regression/whatever.
indate - Initial date (1,31)
inyr - Initial year, YYYY (1985 - )
inmo - Initial month, (1,12)
window - range of dates in past years to search, e.g. 45 will find dates 45 days before/after indate
Returns:
outdates - List of dates meeting the criteria
"""
fnlist = []
#print inmo,indate
try:
xdate = datetime(byear,inmo,indate)
except ValueError:
xdate = datetime(byear,inmo,indate-1)
else:
xdate = datetime(byear,inmo,indate)
while xdate < datetime(eyear+1,1,1):
#print xdate
if xdate.year == inyr:
try:
xdate = datetime((xdate.year + 1),inmo,indate)
except ValueError:
xdate = datetime((xdate.year + 1),inmo,indate-1)
continue
for datechange in xrange(0,35):
tdelta = timedelta(days=datechange)
analogdate = xdate + tdelta
#print analogdate,xdate
if analogdate.year > eyear:
continue
if analogdate.year == inyr:
continue
if analogdate.month != xdate.month:
continue
fnlist.append(analogdate)
try:
xdate = datetime((xdate.year + 1),inmo,indate)
except ValueError:
xdate = datetime(xdate.year+1,inmo,indate-1)
return fnlist
def get_analog_dates(inyr,inmo,indate,byear,eyear,**kwargs):
"""
Used with netCDF4 files and py-netCDF.
From 1985-2011, a number of days range to search
for dates centered around the given date and forecast time,
returns the applicable dates in a list of daetime objects.
In other words, returns a list of file name dates for us to
search for analogs/use in logistic regression/whatever.
indate - Initial date (1,31)
inyr - Initial year, YYYY (1985 - )
inmo - Initial month, (1,12)
window - range of dates in past years to search, e.g. 45 will find dates 45 days before/after indate
byear - earliest year for potential dates (usually 1985)
eyear - latest year for potential dates (usually 2011)
Returns:
outdates - List of dates meeting the criteria
"""
bias_corr = kwargs.get('bias_corr',False)
fnlist = []
date_list = []
#print inmo,indate
try:
xdate = datetime(byear,inmo,indate)
except ValueError:
xdate = datetime(byear,inmo,indate-1)
else:
xdate = datetime(byear,inmo,indate)
while xdate < datetime(eyear+1,1,1):
#print xdate
if xdate.year == inyr:
try:
xdate = datetime((xdate.year + 1),inmo,indate)
except ValueError:
xdate = datetime((xdate.year + 1),inmo,indate-1)
continue
for datechange in reversed(xrange(0,100)):
if xdate.month > 1:
tdelta = timedelta(days=datechange)
analogdate = xdate - tdelta
if analogdate.year < byear:
continue
if analogdate.year == inyr:
continue
if analogdate < datetime(xdate.year,xdate.month-1,1):
continue
fnlist.append(analogdate)
elif xdate.month == 1:
tdelta = timedelta(days=datechange)
analogdate = xdate - tdelta
if analogdate.year < byear:
continue
if analogdate.year == inyr:
continue
if analogdate < datetime(xdate.year-1,12,1):
continue
fnlist.append(analogdate)
for datechange in xrange(1,101):
if xdate.month < 12:
tdelta = timedelta(days=datechange)
analogdate = xdate + tdelta
if analogdate.year > eyear:
continue
if analogdate.year == inyr:
continue
try:
datetime(xdate.year,xdate.month+2,1)
except ValueError: # --- xdate.month == 11
if analogdate >= datetime(xdate.year+1,1,1):
continue
else:
if analogdate >= datetime(xdate.year,xdate.month+2,1):
continue
fnlist.append(analogdate)
elif xdate.month == 12:
tdelta = timedelta(days=datechange)
analogdate = xdate + tdelta
if analogdate.year > eyear:
continue
if analogdate.year == inyr:
continue
if analogdate >= datetime(xdate.year+1,2,1):
continue
fnlist.append(analogdate)
try:
xdate = datetime((xdate.year + 1),inmo,indate)
except ValueError:
xdate = datetime(xdate.year+1,inmo,indate-1)
# --- Here, since we are now using bias-corrected data, we can get additional potetial analog dates!
if bias_corr:
date_list.append(fnlist)
#for n_mo in xrange(1,13,1):
# if (n_mo >= inmo-1) and (n_mo <= inmo+1):
# continue
# else:
# date_list.append(get_1mo_dates(int(inyr),n_mo,1,byear,eyear))
if (inmo < 2) or (inmo > 9):
date_list.append(get_1mo_dates(int(inyr),3,1,byear,eyear))
date_list.append(get_1mo_dates(int(inyr),4,1,byear,eyear))
date_list.append(get_1mo_dates(int(inyr),5,1,byear,eyear))
if (inmo == 2):
date_list.append(get_1mo_dates(int(inyr),4,1,byear,eyear))
date_list.append(get_1mo_dates(int(inyr),5,1,byear,eyear))
date_list.append(get_1mo_dates(int(inyr),10,1,byear,eyear))
date_list.append(get_1mo_dates(int(inyr),11,1,byear,eyear))
if (inmo == 3):
date_list.append(get_1mo_dates(int(inyr),5,1,byear,eyear))
date_list.append(get_1mo_dates(int(inyr),10,1,byear,eyear))
date_list.append(get_1mo_dates(int(inyr),11,1,byear,eyear))
if (inmo == 4):
date_list.append(get_1mo_dates(int(inyr),9,1,byear,eyear))
date_list.append(get_1mo_dates(int(inyr),10,1,byear,eyear))
date_list.append(get_1mo_dates(int(inyr),11,1,byear,eyear))
# --- Now flatten and return the list
date_list = list(itertools.chain.from_iterable(date_list))
return date_list
else:
return fnlist
|
mogismog/retorcast
|
retorcast/utils/get_dates.py
|
Python
|
apache-2.0
| 6,926
|
[
"NetCDF"
] |
14578948b5f6e664a383c2d2eb380e4a6b54e6595c45c2259c671e84aa1d7e61
|
r"""
Composition Statistics (:mod:`skbio.stats.composition`)
=======================================================
.. currentmodule:: skbio.stats.composition
This module provides functions for compositional data analysis.
Many 'omics datasets are inherently compositional - meaning that they
are best interpreted as proportions or percentages rather than
absolute counts.
Formally, :math:`x` is a composition if :math:`\sum_{i=0}^D x_{i} = c`
and :math:`x_{i} > 0`, :math:`1 \leq i \leq D` and :math:`c` is a real
valued constant and there are :math:`D` components for each
composition. In this module :math:`c=1`. Compositional data can be
analyzed using Aitchison geometry. [1]_
However, in this framework, standard real Euclidean operations such as
addition and multiplication no longer apply. Only operations such as
perturbation and power can be used to manipulate this data. [1]_
This module allows two styles of manipulation of compositional data.
Compositional data can be analyzed using perturbation and power
operations, which can be useful for simulation studies. The
alternative strategy is to transform compositional data into the real
space. Right now, the centre log ratio transform (clr) [1]_ can be
used to accomplish this. This transform can be useful for performing
standard statistical tools such as parametric hypothesis testing,
regressions and more.
The major caveat of using this framework is dealing with zeros. In
the Aitchison geometry, only compositions with nonzero components can
be considered. The multiplicative replacement technique [2]_ can be
used to substitute these zeros with small pseudocounts without
introducing major distortions to the data.
Functions
---------
.. autosummary::
:toctree: generated/
closure
multiplicative_replacement
perturb
perturb_inv
power
clr
centralize
References
----------
.. [1] V. Pawlowsky-Glahn. "Lecture Notes on Compositional Data Analysis"
.. [2] J. A. Martin-Fernandez. "Dealing With Zeros and Missing Values in
Compositional Data Sets Using Nonparametric Imputation"
Examples
--------
>>> import numpy as np
Consider a very simple environment with only 3 species. The species
in the environment are equally distributed and their proportions are
equivalent:
>>> otus = np.array([1./3, 1./3., 1./3])
Suppose that an antibiotic kills off half of the population for the
first two species, but doesn't harm the third species. Then the
perturbation vector would be as follows
>>> antibiotic = np.array([1./2, 1./2, 1])
And the resulting perturbation would be
>>> perturb(otus, antibiotic)
array([ 0.25, 0.25, 0.5 ])
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import numpy as np
import scipy.stats as ss
def closure(mat):
"""
Performs closure to ensure that all elements add up to 1.
Parameters
----------
mat : array_like
a matrix of proportions where
rows = compositions
columns = components
Returns
-------
array_like, np.float64
A matrix of proportions where all of the values
are nonzero and each composition (row) adds up to 1
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import closure
>>> X = np.array([[2, 2, 6], [4, 4, 2]])
>>> closure(X)
array([[ 0.2, 0.2, 0.6],
[ 0.4, 0.4, 0.2]])
"""
mat = np.atleast_2d(mat)
if np.any(mat < 0):
raise ValueError("Cannot have negative proportions")
if mat.ndim > 2:
raise ValueError("Input matrix can only have two dimensions or less")
mat = mat / mat.sum(axis=1, keepdims=True)
return mat.squeeze()
def multiplicative_replacement(mat, delta=None):
r"""Replace all zeros with small non-zero values
It uses the multiplicative replacement strategy [1]_ ,
replacing zeros with a small positive :math:`\delta`
and ensuring that the compositions still add up to 1.
Parameters
----------
mat: array_like
a matrix of proportions where
rows = compositions and
columns = components
delta: float, optional
a small number to be used to replace zeros
If delta is not specified, then the default delta is
:math:`\delta = \frac{1}{N^2}` where :math:`N`
is the number of components
Returns
-------
numpy.ndarray, np.float64
A matrix of proportions where all of the values
are nonzero and each composition (row) adds up to 1
References
----------
.. [1] J. A. Martin-Fernandez. "Dealing With Zeros and Missing Values in
Compositional Data Sets Using Nonparametric Imputation"
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import multiplicative_replacement
>>> X = np.array([[.2,.4,.4, 0],[0,.5,.5,0]])
>>> multiplicative_replacement(X)
array([[ 0.1875, 0.375 , 0.375 , 0.0625],
[ 0.0625, 0.4375, 0.4375, 0.0625]])
"""
mat = closure(mat)
z_mat = (mat == 0)
num_feats = mat.shape[-1]
tot = z_mat.sum(axis=-1, keepdims=True)
if delta is None:
delta = (1. / num_feats)**2
zcnts = 1 - tot * delta
mat = np.where(z_mat, delta, zcnts * mat)
return mat.squeeze()
def perturb(x, y):
r"""
Performs the perturbation operation.
This operation is defined as
:math:`x \oplus y = C[x_1 y_1, ..., x_D y_D]`
:math:`C[x]` is the closure operation defined as
:math:`C[x] = [\frac{x_1}{\sum x},...,\frac{x_D}{\sum x}]`
for some :math:`D` dimensional real vector :math:`x` and
:math:`D` is the number of components for every composition.
Parameters
----------
x : array_like, float
a matrix of proportions where
rows = compositions and
columns = components
y : array_like, float
a matrix of proportions where
rows = compositions and
columns = components
Returns
-------
numpy.ndarray, np.float64
A matrix of proportions where all of the values
are nonzero and each composition (row) adds up to 1
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import perturb
>>> x = np.array([.1,.3,.4, .2])
>>> y = np.array([1./6,1./6,1./3,1./3])
>>> perturb(x,y)
array([ 0.0625, 0.1875, 0.5 , 0.25 ])
"""
x, y = closure(x), closure(y)
return closure(x * y)
def perturb_inv(x, y):
r"""
Performs the inverse perturbation operation.
This operation is defined as
:math:`x \ominus y = C[x_1 y_1^{-1}, ..., x_D y_D^{-1}]`
:math:`C[x]` is the closure operation defined as
:math:`C[x] = [\frac{x_1}{\sum x},...,\frac{x_D}{\sum x}]`
for some :math:`D` dimensional real vector :math:`x` and
:math:`D` is the number of components for every composition.
Parameters
----------
x : array_like
a matrix of proportions where
rows = compositions and
columns = components
y : array_like
a matrix of proportions where
rows = compositions and
columns = components
Returns
-------
numpy.ndarray, np.float64
A matrix of proportions where all of the values
are nonzero and each composition (row) adds up to 1
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import perturb_inv
>>> x = np.array([.1,.3,.4, .2])
>>> y = np.array([1./6,1./6,1./3,1./3])
>>> perturb_inv(x,y)
array([ 0.14285714, 0.42857143, 0.28571429, 0.14285714])
"""
x, y = closure(x), closure(y)
return closure(x / y)
def power(x, a):
r"""
Performs the power operation.
This operation is defined as follows
:math:`x \odot a = C[x_1^a, ..., x_D^a]`
:math:`C[x]` is the closure operation defined as
:math:`C[x] = [\frac{x_1}{\sum x},...,\frac{x_D}{\sum x}]`
for some :math:`D` dimensional real vector :math:`x` and
:math:`D` is the number of components for every composition.
Parameters
----------
x : array_like, float
a matrix of proportions where
rows = compositions and
columns = components
a : float
a scalar float
Returns
-------
numpy.ndarray, np.float64
A matrix of proportions where all of the values
are nonzero and each composition (row) adds up to 1
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import power
>>> x = np.array([.1,.3,.4, .2])
>>> power(x, .1)
array([ 0.23059566, 0.25737316, 0.26488486, 0.24714631])
"""
x = closure(x)
return closure(x**a).squeeze()
def clr(mat):
r"""
Performs centre log ratio transformation.
This function transforms compositions from Aitchison geometry to
the real space. This transformation is an isometry, but not an
isomorphism. It is defined for a composition :math:`x` as follows:
:math:`clr(x) = ln[\frac{x_1}{g_m(x)}, ..., \frac{x_D}{g_m(x)}]`
where :math:`g_m(x) = (\prod_{i=1}^{D} x_i)^{1/D}` is the geometric
mean of :math:`x`.
Parameters
----------
mat : array_like, float
a matrix of proportions where
rows = compositions and
columns = components
Returns
-------
numpy.ndarray
clr transformed matrix
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import clr
>>> x = np.array([.1,.3,.4, .2])
>>> clr(x)
array([-0.79451346, 0.30409883, 0.5917809 , -0.10136628])
"""
mat = closure(mat)
lmat = np.log(mat)
gm = lmat.mean(axis=-1, keepdims=True)
return (lmat - gm).squeeze()
def centralize(mat):
"""Center data around its geometric average.
Parameters
----------
mat : array_like, float
a matrix of proportions where
rows = compositions and
columns = components
Returns
-------
numpy.ndarray
centered composition matrix
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import centralize
>>> X = np.array([[.1,.3,.4, .2],[.2,.2,.2,.4]])
>>> centralize(X)
array([[ 0.17445763, 0.30216948, 0.34891526, 0.17445763],
[ 0.32495488, 0.18761279, 0.16247744, 0.32495488]])
"""
mat = closure(mat)
cen = ss.gmean(mat, axis=0)
return perturb_inv(mat, cen)
|
jensreeder/scikit-bio
|
skbio/stats/composition.py
|
Python
|
bsd-3-clause
| 10,803
|
[
"scikit-bio"
] |
8e8f8b85be1ad1b75930c6951dc936da516554715d8b7c3ed45550fbcee581d4
|
""" Determine calibration values for data
This module can be used to determine calibration values from data.
Determine timing offsets for detectors and stations to correct arrival times.
Determine the PMT response curve to correct the detected number of MIPs.
"""
from __future__ import division
from datetime import datetime, timedelta
from itertools import tee, combinations, chain
from six.moves import zip
from numpy import (arange, histogram, percentile, linspace, std, nan, isnan,
sqrt, abs, sum)
from scipy.optimize import curve_fit
from ..clusters import HiSPARCStations, HiSPARCNetwork
from ..utils import gauss, round_in_base, memoize, get_active_index, pbar, c
from ..api import Station
from ..transformations.clock import datetime_to_gps, gps_to_datetime
def determine_detector_timing_offsets(events, station=None):
"""Determine the timing offsets between station detectors.
:param events: events table of processed events.
:param station: :class:`sapphire.clusters.Station` object, to determine
number of detectors and relative altitudes.
:return: list of detector offsets.
"""
offsets = [nan, nan, nan, nan]
if not events.nrows:
return offsets
t = []
filters = []
if station is not None:
n_detectors = len(station.detectors)
station.cluster.set_timestamp(events[0]['timestamp'])
z = [d.get_coordinates()[2] for d in station.detectors]
else:
n_detectors = 4
z = [0., 0., 0., 0.]
for id in range(n_detectors):
t.append(events.col('t%d' % (id + 1)))
filters.append((events.col('n%d' % (id + 1)) > 0.3) & (t[id] >= 0.))
if n_detectors == 2:
ref_id = 1
else:
ref_id = determine_best_reference(filters)
for id in range(n_detectors):
if id == ref_id:
offsets[id] = 0.
continue
dt = (t[id] - t[ref_id]).compress(filters[id] & filters[ref_id])
dz = z[id] - z[ref_id]
offsets[id], _ = determine_detector_timing_offset(dt, dz)
# If all except reference are nan, make reference nan.
if sum(isnan(offsets)) == 3:
offsets = [nan, nan, nan, nan]
# Try to make detector 2 the reference point, if it is not nan.
if not isnan(offsets[1]):
ref = offsets[1]
offsets = [o - ref for o in offsets]
return offsets
def determine_detector_timing_offset(dt, dz=0):
"""Determine the timing offset between station detectors.
:param dt: array of time differences between detectors (t - t_ref).
:param dz: height difference between the detectors (z - z_ref).
:return: mean of a gaussian fit to the data corrected for height, and
the error of the mean.
"""
dt_filter = abs(dt + dz / c) < 100
if not sum(dt_filter):
return nan, nan
p = round_in_base(percentile(dt.compress(dt_filter), [0.5, 99.5]), 2.5)
bins = arange(p[0] + 1.25, p[1], 2.5)
if not len(bins):
return nan, nan
detector_offset, detector_offset_error = fit_timing_offset(dt, bins)
detector_offset += dz / c
if abs(detector_offset) > 100:
return nan, nan
return detector_offset, detector_offset_error
class DetermineStationTimingOffsets(object):
"""Determine the timing offsets between stations"""
# Maximum distance between station pairs that are included in analysis
MAX_DISTANCE = 1000 # m
# Minimum number of timedeltas required to attempt a fit
MIN_LEN_DT = 200
def __init__(self, stations=None, data=None, progress=False,
force_stale=False,
time_deltas_group='/coincidences/time_deltas'):
"""Initialize the class
:param stations: list of stations for which to determine offsets.
:param data: the PyTables datafile with timedelta tables.
:param progress: if True show progressbar when determining offsets.
:param force_stale: if true: do not get network information from API.
:param time_deltas_group: path to the time deltas group.
"""
self.data = data
self.progress = progress
self.force_stale = force_stale
self.time_deltas_group = time_deltas_group
if stations is not None:
self.cluster = HiSPARCStations(stations, skip_missing=True,
force_stale=self.force_stale)
else:
self.cluster = HiSPARCNetwork(force_stale=self.force_stale)
def read_dt(self, station, ref_station, start, end):
"""Read timedeltas from HDF5 file"""
pair = (ref_station, station)
table_path = self.time_deltas_group + '/station_%d/station_%d' % pair
table = self.data.get_node(table_path, 'time_deltas')
ts0 = datetime_to_gps(start) # noqa
ts1 = datetime_to_gps(end) # noqa
return table.read_where('(timestamp >= ts0) & (timestamp < ts1)',
field='delta')
@memoize
def _get_gps_timestamps(self, station):
"""Get timestamps of station gps changes"""
return Station(station,
force_stale=self.force_stale).gps_locations['timestamp']
@memoize
def _get_electronics_timestamps(self, station):
"""Get timestamps of station electronics (hardware) changes"""
return Station(station,
force_stale=self.force_stale).electronics['timestamp']
def _get_cuts(self, station, ref_station):
"""Get cuts for determination of offsets
Get a list of events (new gps location, new electronics)
that (may) cause a large shift in station timing offset
:param station: station number
:param ref_station: reference station number
:return: list of datetime objects
"""
cuts = {self._datetime(gps_to_datetime(ts))
for ts in chain(self._get_gps_timestamps(station),
self._get_gps_timestamps(ref_station),
self._get_electronics_timestamps(station),
self._get_electronics_timestamps(ref_station))}
today = self._datetime(datetime.now())
cuts = sorted(list(cuts) + [today])
return cuts
@memoize
def _get_r_dz(self, date, station, ref_station):
"""Determine r and dz at date
:param date: date for which to get the distances.
:param station,ref_station: station numbers of the station pair.
:return: tuple containing the horizontal and vertical distances.
"""
self.cluster.set_timestamp(datetime_to_gps(date))
r, _, dz = self.cluster.calc_rphiz_for_stations(
self.cluster.get_station(ref_station).station_id,
self.cluster.get_station(station).station_id)
return r, dz
def _determine_interval(self, r):
"""Determine interval (number of days) in which to fit timedelta's
:param r: distrance between stations (m).
:return: number of days in interval.
"""
return max(int(r ** 1.2 / 10), 7)
def _get_left_and_right_bounds(self, cuts, date, days):
"""Determine left and right bounds between cuts
Offsets are determined per day, so intervals are based on days.
Cuts are excluded. Start date (left side bound) is the day
after a cut, end date (right side bound) is the day before a cut.
The last cut (today) is always *included* in the interval,
as this is not a cut that influences the timing offset.
Returns datetime objects with hours, min, sec, msec = 0.
:param cuts: list of datetime objects.
:param date: datetime (middle of interval).
:param days: number of days.
:return: tuple of datetime objects (left bound, right bound).
"""
left = get_active_index(cuts, self._datetime(date))
if left == len(cuts) - 1:
lower_bound = cuts[left - 1]
upper_bound = cuts[-1] # include last day (today) in interval
else:
right = min(left + 1, len(cuts) - 1)
lower_bound = cuts[left]
upper_bound = cuts[right] - timedelta(1)
step = timedelta(round(days / 2))
if days >= (upper_bound - lower_bound).days:
return lower_bound, upper_bound
elif date + step > upper_bound:
return upper_bound - 2 * step, upper_bound
elif date - step < lower_bound:
return lower_bound, lower_bound + 2 * step
else:
return date - step, date + step
def determine_first_and_last_date(self, date, station, ref_station):
"""
Determine first and last date to include in determination of
station offset around date
:param date: date around which the bounds are to be determined.
:param station: station number.
:param ref_station: reference station number.
:return: start and end date bounds.
"""
date = self._datetime(date)
cuts = self._get_cuts(station, ref_station)
r, dz = self._get_r_dz(date, station, ref_station)
interval = self._determine_interval(r)
return self._get_left_and_right_bounds(cuts, date, interval)
def _datetime(self, date):
"""Ensure date is a datetime object
:return: a datetime object with h, m, s, ms = 0.
"""
return datetime(date.year, date.month, date.day)
def determine_station_timing_offset(self, date, station, ref_station):
"""Determine the timing offset between a station pair at certain date
:param date: date for which to determine offset as datetime.date.
:param station: station number.
:param ref_station: reference station number.
:return: station offset and error.
"""
date = self._datetime(date)
left, right = self.determine_first_and_last_date(date, station,
ref_station)
r, dz = self._get_r_dz(date, station, ref_station)
dt = self.read_dt(station, ref_station, left, right)
if len(dt) < self.MIN_LEN_DT:
s_off, error = nan, nan
else:
s_off, error = determine_station_timing_offset(dt, dz)
return s_off, error
def determine_station_timing_offsets(self, station, ref_station,
start=None, end=None):
"""Determine the timing offsets between a station pair
:param station: station number.
:param ref_station: reference station number.
:param start: datetime.date object.
:param end: datetime.date object.
:return: list of station offsets as tuple (timestamp, offset, error).
"""
if start is None:
cuts = self._get_cuts(station, ref_station)
start = self._datetime(cuts[0])
if end is None:
end = self._datetime(datetime.now())
offsets = []
length = (end - start).days
for date, _ in pbar(datetime_range(start, end), show=self.progress,
length=length):
ts0 = datetime_to_gps(date)
s_off, error = self.determine_station_timing_offset(date, station,
ref_station)
offsets.append((ts0, s_off, error))
return offsets
def determine_station_timing_offsets_for_date(self, date):
"""Determine the timing offsets between a station pair
:param date: date for which to determine offsets as datetime.date.
:return: list of station offsets as tuple
(station, ref_station, offset, error).
"""
station_pairs = self.get_station_pairs_within_max_distance(date)
offsets = []
for station, ref_station in station_pairs:
s_off, error = self.determine_station_timing_offset(date, station,
ref_station)
offsets.append((station, ref_station, s_off, error))
return offsets
def get_station_pairs_within_max_distance(self, date=None):
"""Iterator that yields stations pairs that are close to each other"""
if date is not None:
self.cluster.set_timestamp(datetime_to_gps(date))
for so1, so2 in combinations(self.cluster.stations, 2):
s1, s2 = so1.number, so2.number
r = self.cluster.calc_distance_between_stations(s1, s2)
if r <= self.MAX_DISTANCE:
if s1 < s2:
yield s1, s2
else:
yield s2, s1
def determine_station_timing_offset(dt, dz=0):
"""Determine the timing offset between stations.
:param dt: a list of time differences between stations (t - t_ref).
:param dz: height difference between the stations (z - z_ref).
:return: mean of a gaussian fit to the data corrected for height, and
the error of the mean.
"""
if not len(dt):
return nan, nan
p = percentile(dt, [0.5, 99.5])
# Bins should at least be 1 ns wide, on average at least 4 counts per bin
# and at most 200 bins.
bins = linspace(p[0], p[1], min(int(p[1] - p[0]), len(dt) / 4, 200))
station_offset, station_offset_error = fit_timing_offset(dt, bins)
station_offset += dz / c
if abs(station_offset) > 1000:
return nan, nan
return station_offset, station_offset_error
def fit_timing_offset(dt, bins):
"""Fit the time difference distribution.
:param dt: a list of time differences between stations (t - t_ref).
:param bins: bins edges to use for the histogram.
:return: mean of a gaussian fit to the data and the error of the mean.
"""
y, bins = histogram(dt, bins=bins)
x = (bins[:-1] + bins[1:]) / 2
sigma = sqrt(y + 1)
try:
popt, pcov = curve_fit(gauss, x, y, p0=(len(dt), 0., std(dt)),
sigma=sigma, absolute_sigma=False)
offset = popt[1]
width = popt[2]
offset_error = width / sqrt(sum(y))
except (RuntimeError, TypeError):
offset, offset_error = nan, nan
return offset, offset_error
def determine_best_reference(filters):
"""Find which detector has most events in common with the others
:param filters: list of filters for each detector, selecting rows
where that detector has data.
:return: index for the detector that has most rows in common with
the other detectors.
"""
lengths = []
ids = range(len(filters))
for id in ids:
idx = [j for j in ids if j != id]
lengths.append(sum(filters[id] & (filters[idx[0]] |
filters[idx[1]] | filters[idx[2]])))
return lengths.index(max(lengths))
def datetime_range(start, end, step=1):
"""Generator that splits a date range in (almost) equal intervals
The yielded interval lengths are integer days
Spreads remaining days over first intervals
:param start: date instance
:param end: date instance
:param step: the integer number of days in each interval
:return: a tuple of datetime instances for each interval
"""
interval = (end - start).days
number_of_steps = interval // step
if number_of_steps == 0:
yield start, end
return
remainder = interval % step
chunk_start = start
for _ in range(number_of_steps):
chunk_end = chunk_start + timedelta(step + min(1, remainder))
yield chunk_start, chunk_end
chunk_start = chunk_end
remainder = max(0, remainder - 1)
def pairwise(iterable):
"""s -> (s0, s1), (s1, s2), (s2, s3), ..."""
a, b = tee(iterable)
next(b, None)
return zip(a, b)
|
tomkooij/sapphire
|
sapphire/analysis/calibration.py
|
Python
|
gpl-3.0
| 15,927
|
[
"Gaussian"
] |
117a40670fdb8cead41d656228d9dc8c5153c8cc97c3ae6a3a656bc222cb018b
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for linear algebra."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
# Linear algebra ops.
band_part = array_ops.matrix_band_part
cholesky = linalg_ops.cholesky
cholesky_solve = linalg_ops.cholesky_solve
det = linalg_ops.matrix_determinant
slogdet = gen_linalg_ops.log_matrix_determinant
tf_export('linalg.slogdet')(slogdet)
diag = array_ops.matrix_diag
diag_part = array_ops.matrix_diag_part
eigh = linalg_ops.self_adjoint_eig
eigvalsh = linalg_ops.self_adjoint_eigvals
einsum = special_math_ops.einsum
eye = linalg_ops.eye
inv = linalg_ops.matrix_inverse
logm = gen_linalg_ops.matrix_logarithm
lu = gen_linalg_ops.lu
tf_export('linalg.logm')(logm)
lstsq = linalg_ops.matrix_solve_ls
norm = linalg_ops.norm
qr = linalg_ops.qr
set_diag = array_ops.matrix_set_diag
solve = linalg_ops.matrix_solve
sqrtm = linalg_ops.matrix_square_root
svd = linalg_ops.svd
tensordot = math_ops.tensordot
trace = math_ops.trace
transpose = array_ops.matrix_transpose
triangular_solve = linalg_ops.matrix_triangular_solve
@tf_export('linalg.logdet')
@dispatch.add_dispatch_support
def logdet(matrix, name=None):
"""Computes log of the determinant of a hermitian positive definite matrix.
```python
# Compute the determinant of a matrix while reducing the chance of over- or
underflow:
A = ... # shape 10 x 10
det = tf.exp(tf.linalg.logdet(A)) # scalar
```
Args:
matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,
or `complex128` with shape `[..., M, M]`.
name: A name to give this `Op`. Defaults to `logdet`.
Returns:
The natural log of the determinant of `matrix`.
@compatibility(numpy)
Equivalent to numpy.linalg.slogdet, although no sign is returned since only
hermitian positive definite matrices are supported.
@end_compatibility
"""
# This uses the property that the log det(A) = 2*sum(log(real(diag(C))))
# where C is the cholesky decomposition of A.
with ops.name_scope(name, 'logdet', [matrix]):
chol = gen_linalg_ops.cholesky(matrix)
return 2.0 * math_ops.reduce_sum(
math_ops.log(math_ops.real(array_ops.matrix_diag_part(chol))),
axis=[-1])
@tf_export('linalg.adjoint')
@dispatch.add_dispatch_support
def adjoint(matrix, name=None):
"""Transposes the last two dimensions of and conjugates tensor `matrix`.
For example:
```python
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.linalg.adjoint(x) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
```
Args:
matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,
or `complex128` with shape `[..., M, M]`.
name: A name to give this `Op` (optional).
Returns:
The adjoint (a.k.a. Hermitian transpose a.k.a. conjugate transpose) of
matrix.
"""
with ops.name_scope(name, 'adjoint', [matrix]):
matrix = ops.convert_to_tensor(matrix, name='matrix')
return array_ops.matrix_transpose(matrix, conjugate=True)
# This section is ported nearly verbatim from Eigen's implementation:
# https://eigen.tuxfamily.org/dox/unsupported/MatrixExponential_8h_source.html
def _matrix_exp_pade3(matrix):
"""3rd-order Pade approximant for matrix exponential."""
b = [120.0, 60.0, 12.0]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
tmp = matrix_2 + b[1] * ident
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = b[2] * matrix_2 + b[0] * ident
return matrix_u, matrix_v
def _matrix_exp_pade5(matrix):
"""5th-order Pade approximant for matrix exponential."""
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
tmp = matrix_4 + b[3] * matrix_2 + b[1] * ident
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident
return matrix_u, matrix_v
def _matrix_exp_pade7(matrix):
"""7th-order Pade approximant for matrix exponential."""
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0, 56.0]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
matrix_6 = math_ops.matmul(matrix_4, matrix_2)
tmp = matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + b[1] * ident
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = b[6] * matrix_6 + b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident
return matrix_u, matrix_v
def _matrix_exp_pade9(matrix):
"""9th-order Pade approximant for matrix exponential."""
b = [
17643225600.0, 8821612800.0, 2075673600.0, 302702400.0, 30270240.0,
2162160.0, 110880.0, 3960.0, 90.0
]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
matrix_6 = math_ops.matmul(matrix_4, matrix_2)
matrix_8 = math_ops.matmul(matrix_6, matrix_2)
tmp = (
matrix_8 + b[7] * matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 +
b[1] * ident)
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = (
b[8] * matrix_8 + b[6] * matrix_6 + b[4] * matrix_4 + b[2] * matrix_2 +
b[0] * ident)
return matrix_u, matrix_v
def _matrix_exp_pade13(matrix):
"""13th-order Pade approximant for matrix exponential."""
b = [
64764752532480000.0, 32382376266240000.0, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0, 670442572800.0,
33522128640.0, 1323241920.0, 40840800.0, 960960.0, 16380.0, 182.0
]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
matrix_6 = math_ops.matmul(matrix_4, matrix_2)
tmp_u = (
math_ops.matmul(matrix_6, matrix_6 + b[11] * matrix_4 + b[9] * matrix_2) +
b[7] * matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + b[1] * ident)
matrix_u = math_ops.matmul(matrix, tmp_u)
tmp_v = b[12] * matrix_6 + b[10] * matrix_4 + b[8] * matrix_2
matrix_v = (
math_ops.matmul(matrix_6, tmp_v) + b[6] * matrix_6 + b[4] * matrix_4 +
b[2] * matrix_2 + b[0] * ident)
return matrix_u, matrix_v
@tf_export('linalg.expm')
def matrix_exponential(input, name=None): # pylint: disable=redefined-builtin
r"""Computes the matrix exponential of one or more square matrices.
exp(A) = \sum_{n=0}^\infty A^n/n!
The exponential is computed using a combination of the scaling and squaring
method and the Pade approximation. Details can be found in:
Nicholas J. Higham, "The scaling and squaring method for the matrix
exponential revisited," SIAM J. Matrix Anal. Applic., 26:1179-1193, 2005.
The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
form square matrices. The output is a tensor of the same shape as the input
containing the exponential for all input submatrices `[..., :, :]`.
Args:
input: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`, or
`complex128` with shape `[..., M, M]`.
name: A name to give this `Op` (optional).
Returns:
the matrix exponential of the input.
Raises:
ValueError: An unsupported type is provided as input.
@compatibility(scipy)
Equivalent to scipy.linalg.expm
@end_compatibility
"""
with ops.name_scope(name, 'matrix_exponential', [input]):
matrix = ops.convert_to_tensor(input, name='input')
if matrix.shape[-2:] == [0, 0]:
return matrix
batch_shape = matrix.shape[:-2]
if not batch_shape.is_fully_defined():
batch_shape = array_ops.shape(matrix)[:-2]
# reshaping the batch makes the where statements work better
matrix = array_ops.reshape(
matrix, array_ops.concat(([-1], array_ops.shape(matrix)[-2:]), axis=0))
l1_norm = math_ops.reduce_max(
math_ops.reduce_sum(
math_ops.abs(matrix),
axis=array_ops.size(array_ops.shape(matrix)) - 2),
axis=-1)[..., array_ops.newaxis, array_ops.newaxis]
const = lambda x: constant_op.constant(x, l1_norm.dtype)
def _nest_where(vals, cases):
assert len(vals) == len(cases) - 1
if len(vals) == 1:
return array_ops.where_v2(
math_ops.less(l1_norm, const(vals[0])), cases[0], cases[1])
else:
return array_ops.where_v2(
math_ops.less(l1_norm, const(vals[0])), cases[0],
_nest_where(vals[1:], cases[1:]))
if matrix.dtype in [dtypes.float16, dtypes.float32, dtypes.complex64]:
maxnorm = const(3.925724783138660)
squarings = math_ops.maximum(
math_ops.floor(
math_ops.log(l1_norm / maxnorm) / math_ops.log(const(2.0))), 0)
u3, v3 = _matrix_exp_pade3(matrix)
u5, v5 = _matrix_exp_pade5(matrix)
u7, v7 = _matrix_exp_pade7(
matrix /
math_ops.cast(math_ops.pow(const(2.0), squarings), matrix.dtype))
conds = (4.258730016922831e-001, 1.880152677804762e+000)
u = _nest_where(conds, (u3, u5, u7))
v = _nest_where(conds, (v3, v5, v7))
elif matrix.dtype in [dtypes.float64, dtypes.complex128]:
maxnorm = const(5.371920351148152)
squarings = math_ops.maximum(
math_ops.floor(
math_ops.log(l1_norm / maxnorm) / math_ops.log(const(2.0))), 0)
u3, v3 = _matrix_exp_pade3(matrix)
u5, v5 = _matrix_exp_pade5(matrix)
u7, v7 = _matrix_exp_pade7(matrix)
u9, v9 = _matrix_exp_pade9(matrix)
u13, v13 = _matrix_exp_pade13(
matrix /
math_ops.cast(math_ops.pow(const(2.0), squarings), matrix.dtype))
conds = (1.495585217958292e-002, 2.539398330063230e-001,
9.504178996162932e-001, 2.097847961257068e+000)
u = _nest_where(conds, (u3, u5, u7, u9, u13))
v = _nest_where(conds, (v3, v5, v7, v9, v13))
else:
raise ValueError('tf.linalg.expm does not support matrices of type %s' %
matrix.dtype)
numer = u + v
denom = -u + v
result = linalg_ops.matrix_solve(denom, numer)
max_squarings = math_ops.reduce_max(squarings)
i = const(0.0)
c = lambda i, r: math_ops.less(i, max_squarings)
def b(i, r):
return i + 1, array_ops.where_v2(
math_ops.less(i, squarings), math_ops.matmul(r, r), r)
_, result = control_flow_ops.while_loop(c, b, [i, result])
if not matrix.shape.is_fully_defined():
return array_ops.reshape(
result,
array_ops.concat((batch_shape, array_ops.shape(result)[-2:]), axis=0))
return array_ops.reshape(result, batch_shape.concatenate(result.shape[-2:]))
@tf_export('linalg.tridiagonal_solve')
def tridiagonal_solve(diagonals,
rhs,
diagonals_format='compact',
transpose_rhs=False,
conjugate_rhs=False,
name=None,
partial_pivoting=True):
r"""Solves tridiagonal systems of equations.
The input can be supplied in various formats: `matrix`, `sequence` and
`compact`, specified by the `diagonals_format` arg.
In `matrix` format, `diagonals` must be a tensor of shape `[..., M, M]`, with
two inner-most dimensions representing the square tridiagonal matrices.
Elements outside of the three diagonals will be ignored.
In `sequence` format, `diagonals` are supplied as a tuple or list of three
tensors of shapes `[..., N]`, `[..., M]`, `[..., N]` representing
superdiagonals, diagonals, and subdiagonals, respectively. `N` can be either
`M-1` or `M`; in the latter case, the last element of superdiagonal and the
first element of subdiagonal will be ignored.
In `compact` format the three diagonals are brought together into one tensor
of shape `[..., 3, M]`, with last two dimensions containing superdiagonals,
diagonals, and subdiagonals, in order. Similarly to `sequence` format,
elements `diagonals[..., 0, M-1]` and `diagonals[..., 2, 0]` are ignored.
The `compact` format is recommended as the one with best performance. In case
you need to cast a tensor into a compact format manually, use `tf.gather_nd`.
An example for a tensor of shape [m, m]:
```python
rhs = tf.constant([...])
matrix = tf.constant([[...]])
m = matrix.shape[0]
dummy_idx = [0, 0] # An arbitrary element to use as a dummy
indices = [[[i, i + 1] for i in range(m - 1)] + [dummy_idx], # Superdiagonal
[[i, i] for i in range(m)], # Diagonal
[dummy_idx] + [[i + 1, i] for i in range(m - 1)]] # Subdiagonal
diagonals=tf.gather_nd(matrix, indices)
x = tf.linalg.tridiagonal_solve(diagonals, rhs)
```
Regardless of the `diagonals_format`, `rhs` is a tensor of shape `[..., M]` or
`[..., M, K]`. The latter allows to simultaneously solve K systems with the
same left-hand sides and K different right-hand sides. If `transpose_rhs`
is set to `True` the expected shape is `[..., M]` or `[..., K, M]`.
The batch dimensions, denoted as `...`, must be the same in `diagonals` and
`rhs`.
The output is a tensor of the same shape as `rhs`: either `[..., M]` or
`[..., M, K]`.
The op isn't guaranteed to raise an error if the input matrix is not
invertible. `tf.debugging.check_numerics` can be applied to the output to
detect invertibility problems.
**Note**: with large batch sizes, the computation on the GPU may be slow, if
either `partial_pivoting=True` or there are multiple right-hand sides
(`K > 1`). If this issue arises, consider if it's possible to disable pivoting
and have `K = 1`, or, alternatively, consider using CPU.
On CPU, solution is computed via Gaussian elimination with or without partial
pivoting, depending on `partial_pivoting` parameter. On GPU, Nvidia's cuSPARSE
library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv
Args:
diagonals: A `Tensor` or tuple of `Tensor`s describing left-hand sides. The
shape depends of `diagonals_format`, see description above. Must be
`float32`, `float64`, `complex64`, or `complex128`.
rhs: A `Tensor` of shape [..., M] or [..., M, K] and with the same dtype as
`diagonals`. Note that if the shape of `rhs` and/or `diags` isn't known
statically, `rhs` will be treated as a matrix rather than a vector.
diagonals_format: one of `matrix`, `sequence`, or `compact`. Default is
`compact`.
transpose_rhs: If `True`, `rhs` is transposed before solving (has no effect
if the shape of rhs is [..., M]).
conjugate_rhs: If `True`, `rhs` is conjugated before solving.
name: A name to give this `Op` (optional).
partial_pivoting: whether to perform partial pivoting. `True` by default.
Partial pivoting makes the procedure more stable, but slower. Partial
pivoting is unnecessary in some cases, including diagonally dominant and
symmetric positive definite matrices (see e.g. theorem 9.12 in [1]).
Returns:
A `Tensor` of shape [..., M] or [..., M, K] containing the solutions.
Raises:
ValueError: An unsupported type is provided as input, or when the input
tensors have incorrect shapes.
UnimplementedError: Whenever `partial_pivoting` is true and the backend is
XLA.
[1] Nicholas J. Higham (2002). Accuracy and Stability of Numerical Algorithms:
Second Edition. SIAM. p. 175. ISBN 978-0-89871-802-7.
"""
if diagonals_format == 'compact':
return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
conjugate_rhs, partial_pivoting,
name)
if diagonals_format == 'sequence':
if not isinstance(diagonals, (tuple, list)) or len(diagonals) != 3:
raise ValueError('Expected diagonals to be a sequence of length 3.')
superdiag, maindiag, subdiag = diagonals
if (not subdiag.shape[:-1].is_compatible_with(maindiag.shape[:-1]) or
not superdiag.shape[:-1].is_compatible_with(maindiag.shape[:-1])):
raise ValueError(
'Tensors representing the three diagonals must have the same shape,'
'except for the last dimension, got {}, {}, {}'.format(
subdiag.shape, maindiag.shape, superdiag.shape))
m = tensor_shape.dimension_value(maindiag.shape[-1])
def pad_if_necessary(t, name, last_dim_padding):
n = tensor_shape.dimension_value(t.shape[-1])
if not n or n == m:
return t
if n == m - 1:
paddings = ([[0, 0] for _ in range(len(t.shape) - 1)] +
[last_dim_padding])
return array_ops.pad(t, paddings)
raise ValueError('Expected {} to be have length {} or {}, got {}.'.format(
name, m, m - 1, n))
subdiag = pad_if_necessary(subdiag, 'subdiagonal', [1, 0])
superdiag = pad_if_necessary(superdiag, 'superdiagonal', [0, 1])
diagonals = array_ops.stack((superdiag, maindiag, subdiag), axis=-2)
return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
conjugate_rhs, partial_pivoting,
name)
if diagonals_format == 'matrix':
m1 = tensor_shape.dimension_value(diagonals.shape[-1])
m2 = tensor_shape.dimension_value(diagonals.shape[-2])
if m1 and m2 and m1 != m2:
raise ValueError(
'Expected last two dimensions of diagonals to be same, got {} and {}'
.format(m1, m2))
m = m1 or m2
diagonals = array_ops.matrix_diag_part(
diagonals, k=(-1, 1), padding_value=0., align='LEFT_RIGHT')
return _tridiagonal_solve_compact_format(
diagonals, rhs, transpose_rhs, conjugate_rhs, partial_pivoting, name)
raise ValueError('Unrecognized diagonals_format: {}'.format(diagonals_format))
def _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
conjugate_rhs, partial_pivoting, name):
"""Helper function used after the input has been cast to compact form."""
diags_rank, rhs_rank = diagonals.shape.rank, rhs.shape.rank
# If we know the rank of the diagonal tensor, do some static checking.
if diags_rank:
if diags_rank < 2:
raise ValueError(
'Expected diagonals to have rank at least 2, got {}'.format(
diags_rank))
if rhs_rank and rhs_rank != diags_rank and rhs_rank != diags_rank - 1:
raise ValueError('Expected the rank of rhs to be {} or {}, got {}'.format(
diags_rank - 1, diags_rank, rhs_rank))
if (rhs_rank and not diagonals.shape[:-2].is_compatible_with(
rhs.shape[:diags_rank - 2])):
raise ValueError('Batch shapes {} and {} are incompatible'.format(
diagonals.shape[:-2], rhs.shape[:diags_rank - 2]))
if diagonals.shape[-2] and diagonals.shape[-2] != 3:
raise ValueError('Expected 3 diagonals got {}'.format(diagonals.shape[-2]))
def check_num_lhs_matches_num_rhs():
if (diagonals.shape[-1] and rhs.shape[-2] and
diagonals.shape[-1] != rhs.shape[-2]):
raise ValueError('Expected number of left-hand sided and right-hand '
'sides to be equal, got {} and {}'.format(
diagonals.shape[-1], rhs.shape[-2]))
if rhs_rank and diags_rank and rhs_rank == diags_rank - 1:
# Rhs provided as a vector, ignoring transpose_rhs
if conjugate_rhs:
rhs = math_ops.conj(rhs)
rhs = array_ops.expand_dims(rhs, -1)
check_num_lhs_matches_num_rhs()
return array_ops.squeeze(
linalg_ops.tridiagonal_solve(diagonals, rhs, partial_pivoting, name),
-1)
if transpose_rhs:
rhs = array_ops.matrix_transpose(rhs, conjugate=conjugate_rhs)
elif conjugate_rhs:
rhs = math_ops.conj(rhs)
check_num_lhs_matches_num_rhs()
return linalg_ops.tridiagonal_solve(diagonals, rhs, partial_pivoting, name)
@tf_export('linalg.tridiagonal_matmul')
def tridiagonal_matmul(diagonals, rhs, diagonals_format='compact', name=None):
r"""Multiplies tridiagonal matrix by matrix.
`diagonals` is representation of 3-diagonal NxN matrix, which depends on
`diagonals_format`.
In `matrix` format, `diagonals` must be a tensor of shape `[..., M, M]`, with
two inner-most dimensions representing the square tridiagonal matrices.
Elements outside of the three diagonals will be ignored.
If `sequence` format, `diagonals` is list or tuple of three tensors:
`[superdiag, maindiag, subdiag]`, each having shape [..., M]. Last element
of `superdiag` first element of `subdiag` are ignored.
In `compact` format the three diagonals are brought together into one tensor
of shape `[..., 3, M]`, with last two dimensions containing superdiagonals,
diagonals, and subdiagonals, in order. Similarly to `sequence` format,
elements `diagonals[..., 0, M-1]` and `diagonals[..., 2, 0]` are ignored.
The `sequence` format is recommended as the one with the best performance.
`rhs` is matrix to the right of multiplication. It has shape `[..., M, N]`.
Example:
```python
superdiag = tf.constant([-1, -1, 0], dtype=tf.float64)
maindiag = tf.constant([2, 2, 2], dtype=tf.float64)
subdiag = tf.constant([0, -1, -1], dtype=tf.float64)
diagonals = [superdiag, maindiag, subdiag]
rhs = tf.constant([[1, 1], [1, 1], [1, 1]], dtype=tf.float64)
x = tf.linalg.tridiagonal_matmul(diagonals, rhs, diagonals_format='sequence')
```
Args:
diagonals: A `Tensor` or tuple of `Tensor`s describing left-hand sides. The
shape depends of `diagonals_format`, see description above. Must be
`float32`, `float64`, `complex64`, or `complex128`.
rhs: A `Tensor` of shape [..., M, N] and with the same dtype as `diagonals`.
diagonals_format: one of `sequence`, or `compact`. Default is `compact`.
name: A name to give this `Op` (optional).
Returns:
A `Tensor` of shape [..., M, N] containing the result of multiplication.
Raises:
ValueError: An unsupported type is provided as input, or when the input
tensors have incorrect shapes.
"""
if diagonals_format == 'compact':
superdiag = diagonals[..., 0, :]
maindiag = diagonals[..., 1, :]
subdiag = diagonals[..., 2, :]
elif diagonals_format == 'sequence':
superdiag, maindiag, subdiag = diagonals
elif diagonals_format == 'matrix':
m1 = tensor_shape.dimension_value(diagonals.shape[-1])
m2 = tensor_shape.dimension_value(diagonals.shape[-2])
if m1 and m2 and m1 != m2:
raise ValueError(
'Expected last two dimensions of diagonals to be same, got {} and {}'
.format(m1, m2))
diags = array_ops.matrix_diag_part(
diagonals, k=(-1, 1), padding_value=0., align='LEFT_RIGHT')
superdiag = diags[..., 0, :]
maindiag = diags[..., 1, :]
subdiag = diags[..., 2, :]
else:
raise ValueError('Unrecognized diagonals_format: %s' % diagonals_format)
# C++ backend requires matrices.
# Converting 1-dimensional vectors to matrices with 1 row.
superdiag = array_ops.expand_dims(superdiag, -2)
maindiag = array_ops.expand_dims(maindiag, -2)
subdiag = array_ops.expand_dims(subdiag, -2)
return linalg_ops.tridiagonal_mat_mul(superdiag, maindiag, subdiag, rhs, name)
def _maybe_validate_matrix(a, validate_args):
"""Checks that input is a `float` matrix."""
assertions = []
if not a.dtype.is_floating:
raise TypeError('Input `a` must have `float`-like `dtype` '
'(saw {}).'.format(a.dtype.name))
if a.shape is not None and a.shape.rank is not None:
if a.shape.rank < 2:
raise ValueError('Input `a` must have at least 2 dimensions '
'(saw: {}).'.format(a.shape.rank))
elif validate_args:
assertions.append(
check_ops.assert_rank_at_least(
a, rank=2, message='Input `a` must have at least 2 dimensions.'))
return assertions
@tf_export('linalg.matrix_rank')
def matrix_rank(a, tol=None, validate_args=False, name=None):
"""Compute the matrix rank of one or more matrices.
Arguments:
a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be
pseudo-inverted.
tol: Threshold below which the singular value is counted as 'zero'.
Default value: `None` (i.e., `eps * max(rows, cols) * max(singular_val)`).
validate_args: When `True`, additional assertions might be embedded in the
graph.
Default value: `False` (i.e., no graph assertions are added).
name: Python `str` prefixed to ops created by this function.
Default value: 'matrix_rank'.
Returns:
matrix_rank: (Batch of) `int32` scalars representing the number of non-zero
singular values.
"""
with ops.name_scope(name or 'matrix_rank'):
a = ops.convert_to_tensor(a, dtype_hint=dtypes.float32, name='a')
assertions = _maybe_validate_matrix(a, validate_args)
if assertions:
with ops.control_dependencies(assertions):
a = array_ops.identity(a)
s = svd(a, compute_uv=False)
if tol is None:
if (a.shape[-2:]).is_fully_defined():
m = np.max(a.shape[-2:].as_list())
else:
m = math_ops.reduce_max(array_ops.shape(a)[-2:])
eps = np.finfo(a.dtype.as_numpy_dtype).eps
tol = (
eps * math_ops.cast(m, a.dtype) *
math_ops.reduce_max(s, axis=-1, keepdims=True))
return math_ops.reduce_sum(math_ops.cast(s > tol, dtypes.int32), axis=-1)
@tf_export('linalg.pinv')
def pinv(a, rcond=None, validate_args=False, name=None):
"""Compute the Moore-Penrose pseudo-inverse of one or more matrices.
Calculate the [generalized inverse of a matrix](
https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse) using its
singular-value decomposition (SVD) and including all large singular values.
The pseudo-inverse of a matrix `A`, is defined as: 'the matrix that 'solves'
[the least-squares problem] `A @ x = b`,' i.e., if `x_hat` is a solution, then
`A_pinv` is the matrix such that `x_hat = A_pinv @ b`. It can be shown that if
`U @ Sigma @ V.T = A` is the singular value decomposition of `A`, then
`A_pinv = V @ inv(Sigma) U^T`. [(Strang, 1980)][1]
This function is analogous to [`numpy.linalg.pinv`](
https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.pinv.html).
It differs only in default value of `rcond`. In `numpy.linalg.pinv`, the
default `rcond` is `1e-15`. Here the default is
`10. * max(num_rows, num_cols) * np.finfo(dtype).eps`.
Args:
a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be
pseudo-inverted.
rcond: `Tensor` of small singular value cutoffs. Singular values smaller
(in modulus) than `rcond` * largest_singular_value (again, in modulus) are
set to zero. Must broadcast against `tf.shape(a)[:-2]`.
Default value: `10. * max(num_rows, num_cols) * np.finfo(a.dtype).eps`.
validate_args: When `True`, additional assertions might be embedded in the
graph.
Default value: `False` (i.e., no graph assertions are added).
name: Python `str` prefixed to ops created by this function.
Default value: 'pinv'.
Returns:
a_pinv: (Batch of) pseudo-inverse of input `a`. Has same shape as `a` except
rightmost two dimensions are transposed.
Raises:
TypeError: if input `a` does not have `float`-like `dtype`.
ValueError: if input `a` has fewer than 2 dimensions.
#### Examples
```python
import tensorflow as tf
import tensorflow_probability as tfp
a = tf.constant([[1., 0.4, 0.5],
[0.4, 0.2, 0.25],
[0.5, 0.25, 0.35]])
tf.matmul(tf.linalg..pinv(a), a)
# ==> array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]], dtype=float32)
a = tf.constant([[1., 0.4, 0.5, 1.],
[0.4, 0.2, 0.25, 2.],
[0.5, 0.25, 0.35, 3.]])
tf.matmul(tf.linalg..pinv(a), a)
# ==> array([[ 0.76, 0.37, 0.21, -0.02],
[ 0.37, 0.43, -0.33, 0.02],
[ 0.21, -0.33, 0.81, 0.01],
[-0.02, 0.02, 0.01, 1. ]], dtype=float32)
```
#### References
[1]: G. Strang. 'Linear Algebra and Its Applications, 2nd Ed.' Academic Press,
Inc., 1980, pp. 139-142.
"""
with ops.name_scope(name or 'pinv'):
a = ops.convert_to_tensor(a, name='a')
assertions = _maybe_validate_matrix(a, validate_args)
if assertions:
with ops.control_dependencies(assertions):
a = array_ops.identity(a)
dtype = a.dtype.as_numpy_dtype
if rcond is None:
def get_dim_size(dim):
dim_val = tensor_shape.dimension_value(a.shape[dim])
if dim_val is not None:
return dim_val
return array_ops.shape(a)[dim]
num_rows = get_dim_size(-2)
num_cols = get_dim_size(-1)
if isinstance(num_rows, int) and isinstance(num_cols, int):
max_rows_cols = float(max(num_rows, num_cols))
else:
max_rows_cols = math_ops.cast(
math_ops.maximum(num_rows, num_cols), dtype)
rcond = 10. * max_rows_cols * np.finfo(dtype).eps
rcond = ops.convert_to_tensor(rcond, dtype=dtype, name='rcond')
# Calculate pseudo inverse via SVD.
# Note: if a is Hermitian then u == v. (We might observe additional
# performance by explicitly setting `v = u` in such cases.)
[
singular_values, # Sigma
left_singular_vectors, # U
right_singular_vectors, # V
] = svd(
a, full_matrices=False, compute_uv=True)
# Saturate small singular values to inf. This has the effect of make
# `1. / s = 0.` while not resulting in `NaN` gradients.
cutoff = rcond * math_ops.reduce_max(singular_values, axis=-1)
singular_values = array_ops.where_v2(
singular_values > array_ops.expand_dims_v2(cutoff, -1), singular_values,
np.array(np.inf, dtype))
# By the definition of the SVD, `a == u @ s @ v^H`, and the pseudo-inverse
# is defined as `pinv(a) == v @ inv(s) @ u^H`.
a_pinv = math_ops.matmul(
right_singular_vectors / array_ops.expand_dims_v2(singular_values, -2),
left_singular_vectors,
adjoint_b=True)
if a.shape is not None and a.shape.rank is not None:
a_pinv.set_shape(a.shape[:-2].concatenate([a.shape[-1], a.shape[-2]]))
return a_pinv
@tf_export('linalg.lu_solve')
def lu_solve(lower_upper, perm, rhs, validate_args=False, name=None):
"""Solves systems of linear eqns `A X = RHS`, given LU factorizations.
Note: this function does not verify the implied matrix is actually invertible
nor is this condition checked even when `validate_args=True`.
Args:
lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P,
matmul(L, U)) = X` then `lower_upper = L + U - eye`.
perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) =
X` then `perm = argmax(P)`.
rhs: Matrix-shaped float `Tensor` representing targets for which to solve;
`A X = RHS`. To handle vector cases, use: `lu_solve(..., rhs[...,
tf.newaxis])[..., 0]`.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness. Note: this function does not verify the implied matrix is
actually invertible, even when `validate_args=True`.
Default value: `False` (i.e., don't validate arguments).
name: Python `str` name given to ops managed by this object.
Default value: `None` (i.e., 'lu_solve').
Returns:
x: The `X` in `A @ X = RHS`.
#### Examples
```python
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
x = [[[1., 2],
[3, 4]],
[[7, 8],
[3, 4]]]
inv_x = tf.linalg.lu_solve(*tf.linalg.lu(x), rhs=tf.eye(2))
tf.assert_near(tf.matrix_inverse(x), inv_x)
# ==> True
```
"""
with ops.name_scope(name or 'lu_solve'):
lower_upper = ops.convert_to_tensor(
lower_upper, dtype_hint=dtypes.float32, name='lower_upper')
perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm')
rhs = ops.convert_to_tensor(rhs, dtype_hint=lower_upper.dtype, name='rhs')
assertions = _lu_solve_assertions(lower_upper, perm, rhs, validate_args)
if assertions:
with ops.control_dependencies(assertions):
lower_upper = array_ops.identity(lower_upper)
perm = array_ops.identity(perm)
rhs = array_ops.identity(rhs)
if (rhs.shape.rank == 2 and perm.shape.rank == 1):
# Both rhs and perm have scalar batch_shape.
permuted_rhs = array_ops.gather(rhs, perm, axis=-2)
else:
# Either rhs or perm have non-scalar batch_shape or we can't determine
# this information statically.
rhs_shape = array_ops.shape(rhs)
broadcast_batch_shape = array_ops.broadcast_dynamic_shape(
rhs_shape[:-2],
array_ops.shape(perm)[:-1])
d, m = rhs_shape[-2], rhs_shape[-1]
rhs_broadcast_shape = array_ops.concat([broadcast_batch_shape, [d, m]],
axis=0)
# Tile out rhs.
broadcast_rhs = array_ops.broadcast_to(rhs, rhs_broadcast_shape)
broadcast_rhs = array_ops.reshape(broadcast_rhs, [-1, d, m])
# Tile out perm and add batch indices.
broadcast_perm = array_ops.broadcast_to(perm, rhs_broadcast_shape[:-1])
broadcast_perm = array_ops.reshape(broadcast_perm, [-1, d])
broadcast_batch_size = math_ops.reduce_prod(broadcast_batch_shape)
broadcast_batch_indices = array_ops.broadcast_to(
math_ops.range(broadcast_batch_size)[:, array_ops.newaxis],
[broadcast_batch_size, d])
broadcast_perm = array_ops.stack(
[broadcast_batch_indices, broadcast_perm], axis=-1)
permuted_rhs = array_ops.gather_nd(broadcast_rhs, broadcast_perm)
permuted_rhs = array_ops.reshape(permuted_rhs, rhs_broadcast_shape)
lower = set_diag(
band_part(lower_upper, num_lower=-1, num_upper=0),
array_ops.ones(
array_ops.shape(lower_upper)[:-1], dtype=lower_upper.dtype))
return triangular_solve(
lower_upper, # Only upper is accessed.
triangular_solve(lower, permuted_rhs),
lower=False)
@tf_export('linalg.lu_matrix_inverse')
def lu_matrix_inverse(lower_upper, perm, validate_args=False, name=None):
"""Computes the inverse given the LU decomposition(s) of one or more matrices.
This op is conceptually identical to,
```python
inv_X = tf.lu_matrix_inverse(*tf.linalg.lu(X))
tf.assert_near(tf.matrix_inverse(X), inv_X)
# ==> True
```
Note: this function does not verify the implied matrix is actually invertible
nor is this condition checked even when `validate_args=True`.
Args:
lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P,
matmul(L, U)) = X` then `lower_upper = L + U - eye`.
perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) =
X` then `perm = argmax(P)`.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness. Note: this function does not verify the implied matrix is
actually invertible, even when `validate_args=True`.
Default value: `False` (i.e., don't validate arguments).
name: Python `str` name given to ops managed by this object.
Default value: `None` (i.e., 'lu_matrix_inverse').
Returns:
inv_x: The matrix_inv, i.e.,
`tf.matrix_inverse(tf.linalg.lu_reconstruct(lu, perm))`.
#### Examples
```python
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
x = [[[3., 4], [1, 2]],
[[7., 8], [3, 4]]]
inv_x = tf.linalg.lu_matrix_inverse(*tf.linalg.lu(x))
tf.assert_near(tf.matrix_inverse(x), inv_x)
# ==> True
```
"""
with ops.name_scope(name or 'lu_matrix_inverse'):
lower_upper = ops.convert_to_tensor(
lower_upper, dtype_hint=dtypes.float32, name='lower_upper')
perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm')
assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args)
if assertions:
with ops.control_dependencies(assertions):
lower_upper = array_ops.identity(lower_upper)
perm = array_ops.identity(perm)
shape = array_ops.shape(lower_upper)
return lu_solve(
lower_upper,
perm,
rhs=eye(shape[-1], batch_shape=shape[:-2], dtype=lower_upper.dtype),
validate_args=False)
@tf_export('linalg.lu_reconstruct')
def lu_reconstruct(lower_upper, perm, validate_args=False, name=None):
"""The reconstruct one or more matrices from their LU decomposition(s).
Args:
lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P,
matmul(L, U)) = X` then `lower_upper = L + U - eye`.
perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) =
X` then `perm = argmax(P)`.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness.
Default value: `False` (i.e., don't validate arguments).
name: Python `str` name given to ops managed by this object.
Default value: `None` (i.e., 'lu_reconstruct').
Returns:
x: The original input to `tf.linalg.lu`, i.e., `x` as in,
`lu_reconstruct(*tf.linalg.lu(x))`.
#### Examples
```python
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
x = [[[3., 4], [1, 2]],
[[7., 8], [3, 4]]]
x_reconstructed = tf.linalg.lu_reconstruct(*tf.linalg.lu(x))
tf.assert_near(x, x_reconstructed)
# ==> True
```
"""
with ops.name_scope(name or 'lu_reconstruct'):
lower_upper = ops.convert_to_tensor(
lower_upper, dtype_hint=dtypes.float32, name='lower_upper')
perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm')
assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args)
if assertions:
with ops.control_dependencies(assertions):
lower_upper = array_ops.identity(lower_upper)
perm = array_ops.identity(perm)
shape = array_ops.shape(lower_upper)
lower = set_diag(
band_part(lower_upper, num_lower=-1, num_upper=0),
array_ops.ones(shape[:-1], dtype=lower_upper.dtype))
upper = band_part(lower_upper, num_lower=0, num_upper=-1)
x = math_ops.matmul(lower, upper)
if (lower_upper.shape is None or lower_upper.shape.rank is None or
lower_upper.shape.rank != 2):
# We either don't know the batch rank or there are >0 batch dims.
batch_size = math_ops.reduce_prod(shape[:-2])
d = shape[-1]
x = array_ops.reshape(x, [batch_size, d, d])
perm = array_ops.reshape(perm, [batch_size, d])
perm = map_fn.map_fn(array_ops.invert_permutation, perm)
batch_indices = array_ops.broadcast_to(
math_ops.range(batch_size)[:, array_ops.newaxis], [batch_size, d])
x = array_ops.gather_nd(x, array_ops.stack([batch_indices, perm],
axis=-1))
x = array_ops.reshape(x, shape)
else:
x = array_ops.gather(x, array_ops.invert_permutation(perm))
x.set_shape(lower_upper.shape)
return x
def lu_reconstruct_assertions(lower_upper, perm, validate_args):
"""Returns list of assertions related to `lu_reconstruct` assumptions."""
assertions = []
message = 'Input `lower_upper` must have at least 2 dimensions.'
if lower_upper.shape.rank is not None and lower_upper.shape.rank < 2:
raise ValueError(message)
elif validate_args:
assertions.append(
check_ops.assert_rank_at_least_v2(lower_upper, rank=2, message=message))
message = '`rank(lower_upper)` must equal `rank(perm) + 1`'
if lower_upper.shape.rank is not None and perm.shape.rank is not None:
if lower_upper.shape.rank != perm.shape.rank + 1:
raise ValueError(message)
elif validate_args:
assertions.append(
check_ops.assert_rank(
lower_upper, rank=array_ops.rank(perm) + 1, message=message))
message = '`lower_upper` must be square.'
if lower_upper.shape[:-2].is_fully_defined():
if lower_upper.shape[-2] != lower_upper.shape[-1]:
raise ValueError(message)
elif validate_args:
m, n = array_ops.split(
array_ops.shape(lower_upper)[-2:], num_or_size_splits=2)
assertions.append(check_ops.assert_equal(m, n, message=message))
return assertions
def _lu_solve_assertions(lower_upper, perm, rhs, validate_args):
"""Returns list of assertions related to `lu_solve` assumptions."""
assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args)
message = 'Input `rhs` must have at least 2 dimensions.'
if rhs.shape.ndims is not None:
if rhs.shape.ndims < 2:
raise ValueError(message)
elif validate_args:
assertions.append(
check_ops.assert_rank_at_least(rhs, rank=2, message=message))
message = '`lower_upper.shape[-1]` must equal `rhs.shape[-1]`.'
if (lower_upper.shape[-1] is not None and rhs.shape[-2] is not None):
if lower_upper.shape[-1] != rhs.shape[-2]:
raise ValueError(message)
elif validate_args:
assertions.append(
check_ops.assert_equal(
array_ops.shape(lower_upper)[-1],
array_ops.shape(rhs)[-2],
message=message))
return assertions
|
gunan/tensorflow
|
tensorflow/python/ops/linalg/linalg_impl.py
|
Python
|
apache-2.0
| 43,513
|
[
"Gaussian"
] |
636e378eb50982b9efd58473b26999eddc638053f5e9315b618e1af504f9d4d6
|
#!/usr/bin/env python
import time
import numpy
#from pyscf.pbc import df as pdf
from mpi4pyscf.pbc import df as pdf
from pyscf.pbc import scf as pbchf
from pyscf.pbc import gto as pbcgto
nk = 2
kpts = [nk,nk,1]
Lz = 25 # Smallest Lz value for ~1e-6 convergence in absolute energy
a = 1.42 # bond length in graphene
e = []
t = []
pseudo = 'gth-pade'
##################################################
#
# 2D PBC with AFT
#
##################################################
cell = pbcgto.Cell()
cell.build(unit = 'B',
a = [[4.6298286730500005, 0.0, 0.0], [-2.3149143365249993, 4.009549246030899, 0.0], [0.0, 0.0, Lz]],
atom = 'C 0 0 0; C 0 2.67303283 0',
dimension=2,
low_dim_ft_type = 'inf_vacuum',
pseudo = pseudo,
verbose = 4,
precision = 1e-6,
basis='gth-szv')
t0 = time.time()
mf = pbchf.KRHF(cell)
mf.with_df = pdf.AFTDF(cell)
mf.kpts = cell.make_kpts(kpts)
mf.conv_tol = 1e-6
e.append(mf.kernel())
t.append(time.time() - t0)
##################################################
#
# 2D PBC with FFT
#
##################################################
cell = pbcgto.Cell()
cell.build(unit = 'B',
a = [[4.6298286730500005, 0.0, 0.0], [-2.3149143365249993, 4.009549246030899, 0.0], [0.0, 0.0, Lz]],
atom = 'C 0 0 0; C 0 2.67303283 0',
dimension=2,
pseudo = pseudo,
verbose = 4,
precision = 1e-6,
low_dim_ft_type='analytic_2d_1',
basis='gth-szv')
t0 = time.time()
mf = pbchf.KRHF(cell)
mf.with_df = pdf.FFTDF(cell)
mf.kpts = cell.make_kpts(kpts)
mf.conv_tol = 1e-6
e.append(mf.kernel())
t.append(time.time() - t0)
##################################################
#
# 2D PBC with GDF
#
##################################################
t0 = time.time()
mf = pbchf.KRHF(cell)
mf.with_df = pdf.GDF(cell)
mf.kpts = cell.make_kpts(kpts)
mf.conv_tol = 1e-6
e.append(mf.kernel())
t.append(time.time() - t0)
##################################################
#
# 2D PBC with MDF
#
##################################################
t0 = time.time()
mf = pbchf.KRHF(cell)
mf.with_df = pdf.MDF(cell)
mf.kpts = cell.make_kpts(kpts)
mf.conv_tol = 1e-6
e.append(mf.kernel())
t.append(time.time() - t0)
print('Energy (AFTDF) (FFTDF) (GDF) (MDF)')
print(e)
print('Timing (AFTDF) (FFTDF) (GDF) (MDF)')
print(t)
|
sunqm/mpi4pyscf
|
examples/32-graphene.py
|
Python
|
gpl-3.0
| 2,386
|
[
"PySCF"
] |
0bbbcf8a4c7003c4bc7ad78001069c7dc1a1f86ed5a44526649261745c43fb45
|
# #############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2014 Stanford University and the Authors
#
# Authors: Matthew Harrigan
# Contributors: Robert T. McGibbon
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
# #############################################################################
from __future__ import print_function
import re
import ast
import sys
from copy import deepcopy
from collections import namedtuple
from mdtraj.utils.six import PY2
from mdtraj.utils.external.pyparsing import (Word, ParserElement, MatchFirst,
Keyword, opAssoc, quotedString, alphas, alphanums, infixNotation, Group,
Optional, ParseException)
from mdtraj.utils.external.astor import codegen
ParserElement.enablePackrat()
__all__ = ['parse_selection']
# ############################################################################
# Globals
# ############################################################################
NUMS = '.0123456789'
THIS_ATOM = ast.Name(id='atom', ctx=ast.Load(), SINGLETON=True)
RE_MODULE = ast.Name(id='re', ctx=ast.Load(), SINGLETON=True)
SELECTION_GLOBALS = {'re': re}
_ParsedSelection = namedtuple('_ParsedSelection', ['expr', 'source', 'astnode'])
# ############################################################################
# Utils
# ############################################################################
class _RewriteNames(ast.NodeTransformer):
def visit_Name(self, node):
if hasattr(node, 'SINGLETON'):
return node
_safe_names = {'None': None, 'True': True, 'False': False}
if node.id in _safe_names:
if sys.version_info >= (3, 4):
return ast.NameConstant(value=_safe_names[node.id])
return node
# all other bare names are taken to be string literals. Thus something
# like parse_selection('name CA') properly resolves CA as a string
# literal, not a barename to be loaded from the global scope!
return ast.Str(s=node.id)
def _chain(*attrs):
"""This transforms, for example, ('residue', 'is_protein'), into
Attribute(value=Attribute(value=THIS_ATOM,
attr='residue', ctx=Load()), attr='is_protein', ctx=Load())
"""
left = THIS_ATOM
for attr in attrs:
left = ast.Attribute(value=left, attr=attr, ctx=ast.Load())
return left
def _kw(*tuples):
"""Create a many-to-one dictionary.
_kw((['one', '1'], 'one'))
gives {'one': 'one', '1': 'one'}
"""
dic = dict()
for keys, val in tuples:
for key in keys:
dic[key] = val
return dic
def _check_n_tokens(tokens, n_tokens, name):
if not len(tokens) == n_tokens:
err = "{} take {} values. You gave {}"
err = err.format(name, n_tokens, len(tokens))
raise ParseException(err)
class SelectionKeyword(object):
keyword_aliases = _kw(
# Atom.<attribute>
(('all', 'everything'), ast.Name(id='True', ctx=ast.Load())),
(('none', 'nothing'), ast.Name(id='False', ctx=ast.Load())),
(('backbone', 'is_backbone'), _chain('is_backbone')),
(('sidechain', 'is_sidechain'), _chain('is_sidechain')),
# Atom.residue.<attribute>
(('protein', 'is_protein'), _chain('residue', 'is_protein')),
# (('nucleic', 'is_nucleic'), _chain('residue', 'is_nucleic')),
(('water', 'waters', 'is_water'), _chain('residue', 'is_water')),
(('name',), _chain('name')),
(('index',), _chain('index')),
(('n_bonds',), _chain('n_bonds')),
(('residue', 'resSeq'), _chain('residue', 'resSeq')),
(('resname', 'resn'), _chain('residue', 'name')),
(('resid', 'resi'), _chain('residue', 'index')),
# Atom.residue.chain.<attribute>
(('chainid',), _chain('residue', 'chain', 'index')),
# Atom.element.<attribute>
(('type', 'element', 'symbol'), _chain('element', 'symbol')),
# (('radius',), _chain('element', 'radius')),
(('mass',), _chain('element', 'mass')),
)
def __init__(self, tokens):
# pyparsing constructs the instance while building the parse tree,
# and gives us the set tokens. In this case, the tokens are
self._tokens = tokens
_check_n_tokens(tokens, 1, 'Unary selectors')
assert tokens[0] in self.keyword_aliases
def ast(self):
return self.keyword_aliases[self._tokens[0]]
class Literal(object):
def __init__(self, tokens):
self.token = tokens[0]
_check_n_tokens(tokens, 1, 'literal')
def ast(self):
return ast.parse(self.token, mode='eval').body
class UnaryInfixOperand(object):
n_terms = 1
assoc = 'RIGHT'
keyword_aliases = _kw(
(['not ', '!'], ast.Not()),
)
def __init__(self, tokens):
tokens = tokens[0]
_check_n_tokens(tokens, 2, 'Unary infix operators')
self.op_token, self.value_token = tokens
assert self.op_token in self.keyword_aliases
if isinstance(self.value_token, Literal):
raise ValueError("Cannot use literals as booleans.")
def ast(self):
return ast.UnaryOp(op=self.keyword_aliases[self.op_token],
operand=self.value_token.ast())
class RegexInfixOperand(object):
n_terms = 2
assoc = 'LEFT'
keyword_aliases = {'=~': '=~'}
def __init__(self, tokens):
self.tokens = tokens[0]
_check_n_tokens(self.tokens, 3, 'regex operator')
self.string, op, self.pattern = self.tokens
assert op == '=~'
if isinstance(self.string, Literal):
raise ValueError("Cannot do regex comparison on literal")
def ast(self):
pattern = self.tokens[2].ast()
string = self.tokens[0].ast()
return ast.Compare(
left=ast.Call(func=ast.Attribute(value=RE_MODULE, attr='match',
ctx=ast.Load()),
args=[pattern, string], keywords=[], starargs=None,
kwargs=None),
ops=[ast.IsNot()], comparators=[ast.Name(id='None', ctx=ast.Load())]
)
class BinaryInfixOperand(object):
n_terms = 2
assoc = 'LEFT'
keyword_aliases = _kw(
(['and', '&&'], ast.And()),
(['or', '||'], ast.Or()),
(['<', 'lt'], ast.Lt()),
(['==', 'eq'], ast.Eq()),
(['<=', 'le'], ast.LtE()),
(['!=', 'ne'], ast.NotEq()),
(['>=', 'ge'], ast.GtE()),
(['>', 'gt'], ast.Gt()),
)
def __init__(self, tokens):
tokens = tokens[0]
if len(tokens) % 2 == 1:
self.op_token = tokens[1]
self.comparators = tokens[::2]
else:
err = "Invalid number of infix expressions: {}"
err = err.format(len(tokens))
raise ParseException(err)
assert self.op_token in self.keyword_aliases
# Check for too many literals and not enough keywords
op = self.keyword_aliases[self.op_token]
if isinstance(op, ast.boolop):
if any(isinstance(c, Literal) for c in self.comparators):
raise ValueError("Cannot use literals as truth")
else:
if all(isinstance(c, Literal) for c in self.comparators):
raise ValueError("Cannot compare literals.")
def ast(self):
op = self.keyword_aliases[self.op_token]
if isinstance(op, ast.boolop):
# and and or use one type of AST node
value = ast.BoolOp(op=op, values=[e.ast() for e in self.comparators])
else:
# remaining operators use another
value = ast.Compare(left=self.comparators[0].ast(), ops=[op],
comparators=[e.ast() for e in self.comparators[1:]])
return value
class RangeCondition(object):
def __init__(self, tokens):
tokens = tokens[0]
_check_n_tokens(tokens, 4, 'range condition')
assert tokens[2] == 'to'
self._from, self._center, self._to = tokens[0], tokens[1], tokens[3]
if isinstance(self._from, Literal):
raise ValueError("Can't test literal in range.")
def ast(self):
return ast.Compare(left=self._center.ast(), ops=[ast.LtE(), ast.LtE()],
comparators=[self._from.ast(), self._to.ast()])
class parse_selection(object):
"""Parse an atom selection expression
Parameters
----------
selection_string : str
Selection string, a string in the MDTraj atom selection grammer.
Returns
-------
expr : callable (atom -> bool)
A callable object which accepts an MDTraj.core.topology.Atom object and
returns a boolean value giving whether or not that particular atom
satisfies the selection string.
source : str
Python source code corresponding to the expression ``expr``.
astnode : ast.AST
Python abstract syntax tree node containing the parsed expression
Examples
--------
>>> expr, source, astnode = parse_selection('protein and type CA')
>>> expr
<function __main__.<lambda>>
>>> source
'(atom.residue.is_protein and (atom.element.symbol == CA))'
>>> <_ast.BoolOp at 0x103969d50>
"""
def __init__(self):
self.is_initialized = False
self.expression = None
def _initialize(self):
def keywords(klass):
kws = sorted(klass.keyword_aliases.keys())
return MatchFirst([Keyword(kw) for kw in kws])
def infix(klass):
kws = sorted(klass.keyword_aliases.keys())
return [(kw, klass.n_terms, getattr(opAssoc, klass.assoc), klass)
for kw in kws]
# literals include words made of alphanumerics, numbers,
# or quoted strings but we exclude any of the logical
# operands (e.g. 'or') from being parsed literals
literal = (
~(keywords(BinaryInfixOperand) | keywords(UnaryInfixOperand)) +
(Word(NUMS) | quotedString | Word(alphas, alphanums))
)
literal.setParseAction(Literal)
# These are the other 'root' expressions,
# the selection keywords (resname, resid, mass, etc)
selection_keyword = keywords(SelectionKeyword)
selection_keyword.setParseAction(SelectionKeyword)
base_expression = MatchFirst([selection_keyword, literal])
# the grammar includes implicit equality comparisons
# between adjacent expressions:
# i.e. 'name CA' --> 'name == CA'
implicit_equality = Group(
base_expression + Optional(Keyword('=='), '==') + base_expression
)
implicit_equality.setParseAction(BinaryInfixOperand)
# range condition matches expressions such as 'mass 1 to 20'
range_condition = Group(
base_expression + literal + Keyword('to') + literal
)
range_condition.setParseAction(RangeCondition)
expression = range_condition | implicit_equality | base_expression
logical_expr = infixNotation(
expression,
infix(UnaryInfixOperand) +
infix(BinaryInfixOperand) +
infix(RegexInfixOperand)
)
self.expression = logical_expr
self.is_initialized = True
self.transformer = _RewriteNames()
def __call__(self, selection):
if not self.is_initialized:
self._initialize()
try:
parse_result = self.expression.parseString(selection, parseAll=True)
except ParseException as e:
msg = str(e)
lines = ["%s: %s" % (msg, selection),
" " * (12 + len("%s: " % msg) + e.loc) + "^^^"]
raise ValueError('\n'.join(lines))
# Change __ATOM__ in function bodies. It must bind to the arg
# name specified below (i.e. 'atom')
astnode = self.transformer.visit(deepcopy(parse_result[0].ast()))
# Special check for a single literal
if isinstance(astnode, ast.Num) or isinstance(astnode, ast.Str):
raise ValueError("Cannot use a single literal as a boolean.")
if PY2:
args = [ast.Name(id='atom', ctx=ast.Param())]
signature = ast.arguments(args=args, vararg=None, kwarg=None,
defaults=[])
else:
args = [ast.arg(arg='atom', annotation=None)]
signature = ast.arguments(args=args, vararg=None, kwarg=None,
kwonlyargs=[], defaults=[],
kw_defaults=[])
func = ast.Expression(body=ast.Lambda(signature, astnode))
source = codegen.to_source(astnode)
expr = eval(
compile(ast.fix_missing_locations(func), '<string>', mode='eval'),
SELECTION_GLOBALS)
return _ParsedSelection(expr, source, astnode)
# Create the callable, and use it to overshadow the class. this way there's
# basically just one global instance of the "function", even thought its
# a callable class.
parse_selection = parse_selection()
if __name__ == '__main__':
import sys
exp = parse_selection(sys.argv[1])
print(exp.source)
print(ast.dump(exp.astnode))
|
kyleabeauchamp/mdtraj
|
mdtraj/core/selection.py
|
Python
|
lgpl-2.1
| 13,995
|
[
"MDTraj",
"VisIt"
] |
296ed1d0d84cfe9b9581fb72d9d495ea8f5c3505dfb92072a64ff16c34238d71
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RJaspar2018(RPackage):
"""Data package for JASPAR 2018. To search this databases,
please use the package TFBSTools (>= 1.15.6)."""
homepage = "http://jaspar.genereg.net/"
url = "https://git.bioconductor.org/packages/JASPAR2018"
version('1.0.0', git='https://git.bioconductor.org/packages/JASPAR2018', commit='4c84092b3737bb1c57ab56f4321f2f5e4b0efeaa')
depends_on('r@3.4.0:')
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/r-jaspar2018/package.py
|
Python
|
lgpl-2.1
| 1,670
|
[
"Bioconductor"
] |
85521f98e301360e4efd7290ee8fff8956d130601f5b926646067af6b0193120
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 4 19:53:51 2014
Phase correlation drift correction.
Used papers Cross-correlation image tracking for drift correction and
adsorbate analysis B. A. Mantooth, Z. J. Donhauser, K. F. Kelly, and P. S. Weiss
for inspiration.
@author: Monika Kauer
"""
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from scipy import ndimage
import os, sys
import scipy.misc as misc
import pump_writer
##===================================================#
# image generator in/output
## ==================================================#
def read_sequentially(params, intrvl = 1):
"""Function that reads sequentially with every call."""
filenames = os.listdir(params['directory'])
filenames = [f for f in filenames if ".%s"% params["type"] in f]
filenames = pump_writer.natural_sort(filenames)
filenames2 = filenames[params["start"]:params["end"]:intrvl]
#deal with non-divisor chunk lengths
if (params["end"]-params["start"])%intrvl != 0:
try:
filenames2.append(filenames[params["end"]])
params['nof'] = (params["end"]-params["start"])
except IndexError:
filenames2.append(filenames[-1])
params['nof'] = (len(filenames)-params["start"])
elif intrvl > 1 and (params["end"]-params["start"])%intrvl == 0:
try:
filenames2.append(filenames[params["end"]])
params['nof'] = (params["end"]-params["start"])
except IndexError:
filenames2.append(filenames[-1])
params['nof'] = (len(filenames)-params["start"])
for filename in filenames2:
yield read_img(params['directory']+filename, params)
def read_img(fname, params):
"""reads an image file as array."""
try:
image = misc.imread(fname, flatten=True)
data = np.asarray(image, dtype = np.int64)
if params['rotate']:
data = np.transpose(data)
if params['cropx']:
data = data[:,params['cropx'][0]:params['cropx'][1]]
return data
except IOError:
print fname
pass
##===================================================#
# image registration
## ==================================================#
def reg(im1, im2, params):
"""Find image-image correlation and translation vector using FFTs."""
# use hanning window. Reduces the edge effect from finite size
shape= np.array(im1.shape)
fft_im1 = np.fft.fft2(im1)
fft_im2 = np.conj(np.fft.fft2(im2))
corr = np.fft.ifft2(fft_im1*fft_im2).real
corr = ndimage.gaussian_filter(corr, .5) - ndimage.gaussian_filter(corr, 30)
t0, t1 = np.unravel_index(np.argmax(corr), shape)
if t0 > shape[0] // 2:
t0 -= shape[0]
if t1 > shape[1] // 2:
t1 -= shape[1]
return corr, [t0, 0]
def find_roi(params):
"""calculates image drift using registration via correlation."""
im = read_sequentially(params, intrvl = params["chunk"])
roi = [[0,0]]
im_new = im.next()
height, width = im_new.shape
try:
while True: #go through all image chunks from start to end
im_old = im_new
im_new = im.next()
im1 = np.where(im_old>np.median(im_old), 1,0)
im2 = np.where(im_new>np.median(im_new), 1,0)
_,drift = reg(im1, im2, params)
roi.append(drift)
except StopIteration:
pass
finally:
del im
return np.array(roi)
def interpol_drift(drift, params):
"""Returns linearly interpolated ROI.
This uses drift calculation where drift comes from adjacent reference frame."""
x = np.cumsum(drift[:,1])
y = np.cumsum(drift[:,0])
r = np.zeros((params['nof'],2))
dr = params['nof']%params['chunk']
for cnt in xrange(1,len(r)-dr):
index = float(cnt)/(params["chunk"])
i = int(index)+1
vy, vx = y[i]-y[i-1], x[i]-x[i-1]
#r[cnt] = (index%1*vy)+y[i-1],(index%1*vx)+x[i-1]
r[cnt] = y[i-1]+(index%1)*vy,x[i-1]+(index%1)*vx
if cnt == len(r)-dr-1:
#this deals with leftover interval if images%chunk!=0
for rest in xrange(1,dr+1):
index = float(rest)/dr+1
vy, vx = y[i]-y[i-1], x[i]-x[i-1]
r[cnt+rest] = y[i-1]+(index%1*vy),x[i-1]+(index%1*vx)
return r
##===================================================#
# feature detection for neuron physiological imaging
## ==================================================#
def fluorescence(params, roi):
"""finds a neuron from images using thresholding
in a region of interest."""
images = read_sequentially(params)
values,locations = [],[]
try:
cnt = 0
imgs = ndimage.shift(images.next(), roi[cnt], mode="wrap")
cms_old = [params['y0'],params['x0']]
#print "cms_old is ",cms_old
val_old = []
y0, x0 = cms_old
height, width = imgs.shape
cnt += 1
while True:
y1, x1, fluor, bg = similarity3(imgs, cms_old,[y0,x0], params)
#implement a short memory of neuron position
val_old.append([y1, x1])
y0 = np.average([v[0] for v in val_old[-10:]])
x0 = np.average([v[1] for v in val_old[-10:]])
values.append([fluor, bg])
locations.append([y1-roi[cnt-1][0],x1+params["cropx"][0]-roi[cnt-1][1]])
imgs = ndimage.shift(images.next(), roi[cnt], mode="wrap")
cnt += 1
except StopIteration:
pass
finally:
del images
return np.array(values), np.array(locations)
def similarity3(im1, cms,old_coor, params):
"""Calculates fluorescence of neuron by thresholding."""
bgsize = params["bgsize"]
part1 = im1[max(0,cms[0]-bgsize):cms[0]+bgsize, max(0,cms[1]-bgsize):cms[1]+bgsize]
offsety, offsetx = max(0,cms[0]-bgsize), max(0,cms[1]-bgsize)
height, width = part1.shape
y0,x0 = old_coor #previous coords
thresh = np.sort(part1, axis=None)[-int(params["thresh_pump"]*height*width)]
#print "threshold is", thresh
mask = np.where(part1 > thresh, 1, 0)
mask = ndimage.binary_opening(mask,structure = np.ones((2,2)))
mask = ndimage.binary_closing(mask)
label_im, nb_labels = ndimage.label(mask)
centroids = ndimage.measurements.center_of_mass(part1, label_im, xrange(1,nb_labels+1))
dist = []
for index, coord in enumerate(centroids):
y,x= coord
dist.append((y-y0+offsety)**2 + (x-x0+offsetx)**2)
if min(dist)>2*params["max_movement"]**2:
print dist, y0,x0, offsety, offsetx,
y,x = y0-offsety,x0-offsetx
radius = params["roisize"]
neuron = part1[max(0,y-radius):y+radius,max(0,x-radius):x+radius,]
value = np.ma.average(np.sort(neuron, axis=None)[-20:])
else:
loc = np.argmin(dist)
y,x = centroids[loc]
remove_pixel = np.where(label_im ==loc+1,0,1)
neuron = np.ma.masked_array(part1, remove_pixel)
value = np.ma.average(neuron)
try:
radius = params["roisize"]
mask1 = np.zeros(part1.shape, dtype=bool)
mask1[max(0,y-radius):y+radius,max(0,x-radius):x+radius,] = True
bg_mask = np.ma.mask_or(mask,mask1)
bg = np.ma.masked_array(part1, bg_mask)
bg_level = np.ma.average(bg)
except IndexError:
y,x=y0,x0
value=0
bg_level=0
return y+offsety, x+offsetx, value, bg_level
##===================================================#
# Main
## ==================================================#
def warp_detector(params):
##===================================================#
# Translation correction
## ==================================================#
drift = find_roi(params)
drift = interpol_drift(drift, params)
print "done with drift"
sys.stdout.flush()
##===================================================#
# detect pumping
## ==================================================#
coords = fluorescence(params, drift)
time = np.arange(params["start"], params["start"]+len(coords),1)
out_data = zip(time,coords[:,0], coords[:,1],coords[:,2])
print "Analysis of: ",params["start"], params["end"]
##===================================================#
# write results and movie
## ==================================================#
if len(coords) > 0:
outputstring = "%s_%i_%i"%(params["basename"],params["start"],params["end"]-1)
pump_writer.write_data(params["outdir"], outputstring+"_kymo", out_data, 4)
images = read_sequentially(params)
fig = plt.figure(params["start"]+1) #make unique figures needed for parallelization
ax1 = fig.add_subplot(211)
pump_writer.make_kymograph(images, params, diff=False, roi=drift)
ax1.plot(cms[:,0]+drift[:,0])
ax1.set_xlim([0,len(cms)])
ax2 = fig.add_subplot(212)
ax2.plot(coords[:,0])
ax2.plot(coords[:,1])
ax2.set_xlim([0,len(coords)])
ax2.set_ylim(ax2.get_ylim()[::-1])
fig.savefig(params["outdir"]+"/"+outputstring+"_kym.png")
|
monikascholz/pWARP
|
fluowarp.py
|
Python
|
gpl-2.0
| 9,385
|
[
"NEURON"
] |
7c13f1dbfeb7e4dbecf43e216b46ea61df793557efdf9f2da2f5b08b1a6fce44
|
"""Functions that expose information about templates that might be
interesting for introspection.
"""
import typing as t
from . import nodes
from .compiler import CodeGenerator
from .compiler import Frame
if t.TYPE_CHECKING:
from .environment import Environment
class TrackingCodeGenerator(CodeGenerator):
"""We abuse the code generator for introspection."""
def __init__(self, environment: "Environment") -> None:
super().__init__(environment, "<introspection>", "<introspection>")
self.undeclared_identifiers: t.Set[str] = set()
def write(self, x: str) -> None:
"""Don't write."""
def enter_frame(self, frame: Frame) -> None:
"""Remember all undeclared identifiers."""
super().enter_frame(frame)
for _, (action, param) in frame.symbols.loads.items():
if action == "resolve" and param not in self.environment.globals:
self.undeclared_identifiers.add(param)
def find_undeclared_variables(ast: nodes.Template) -> t.Set[str]:
"""Returns a set of all variables in the AST that will be looked up from
the context at runtime. Because at compile time it's not known which
variables will be used depending on the path the execution takes at
runtime, all variables are returned.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
>>> meta.find_undeclared_variables(ast) == {'bar'}
True
.. admonition:: Implementation
Internally the code generator is used for finding undeclared variables.
This is good to know because the code generator might raise a
:exc:`TemplateAssertionError` during compilation and as a matter of
fact this function can currently raise that exception as well.
"""
codegen = TrackingCodeGenerator(ast.environment) # type: ignore
codegen.visit(ast)
return codegen.undeclared_identifiers
_ref_types = (nodes.Extends, nodes.FromImport, nodes.Import, nodes.Include)
_RefType = t.Union[nodes.Extends, nodes.FromImport, nodes.Import, nodes.Include]
def find_referenced_templates(ast: nodes.Template) -> t.Iterator[t.Optional[str]]:
"""Finds all the referenced templates from the AST. This will return an
iterator over all the hardcoded template extensions, inclusions and
imports. If dynamic inheritance or inclusion is used, `None` will be
yielded.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% extends "layout.html" %}{% include helper %}')
>>> list(meta.find_referenced_templates(ast))
['layout.html', None]
This function is useful for dependency tracking. For example if you want
to rebuild parts of the website after a layout template has changed.
"""
template_name: t.Any
for node in ast.find_all(_ref_types):
template: nodes.Expr = node.template # type: ignore
if not isinstance(template, nodes.Const):
# a tuple with some non consts in there
if isinstance(template, (nodes.Tuple, nodes.List)):
for template_name in template.items:
# something const, only yield the strings and ignore
# non-string consts that really just make no sense
if isinstance(template_name, nodes.Const):
if isinstance(template_name.value, str):
yield template_name.value
# something dynamic in there
else:
yield None
# something dynamic we don't know about here
else:
yield None
continue
# constant is a basestring, direct template name
if isinstance(template.value, str):
yield template.value
# a tuple or list (latter *should* not happen) made of consts,
# yield the consts that are strings. We could warn here for
# non string values
elif isinstance(node, nodes.Include) and isinstance(
template.value, (tuple, list)
):
for template_name in template.value:
if isinstance(template_name, str):
yield template_name
# something else we don't care about, we could warn here
else:
yield None
|
pallets/jinja
|
src/jinja2/meta.py
|
Python
|
bsd-3-clause
| 4,396
|
[
"VisIt"
] |
18d3c4be27e649a53708cc656e17813998de676efb1d384e3d44dfd51929a4a4
|
# setuptools installation of GromacsWrapper
# Copyright (c) 2008-2011 Oliver Beckstein <orbeckst@gmail.com>
# Released under the GNU Public License 3 (or higher, your choice)
#
# See the files INSTALL and README for details or visit
# https://github.com/Becksteinlab/GromacsWrapper
from __future__ import with_statement
from setuptools import setup, find_packages
import imp, os
with open("README.rst") as readme:
long_description = readme.read()
# Dynamically calculate the version based on gromacs.VERSION.
# (but requires that we can actually import the package BEFORE it is
# properly installed!)
version_file = os.path.join(os.path.dirname(__file__), 'gromacs', 'version.py')
version = imp.load_source('gromacs.version', version_file).get_version()
setup(name="GromacsWrapper",
version=version,
description="A python wrapper around the gromacs tools.",
long_description=long_description,
author="Oliver Beckstein",
author_email="orbeckst@gmail.com",
license="GPLv3",
url="https://github.com/Becksteinlab/GromacsWrapper",
download_url="https://github.com/Becksteinlab/GromacsWrapper/downloads",
keywords="science Gromacs analysis 'molecular dynamics'",
classifiers=['Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=find_packages(exclude=['tests','scripts','extras','doc/examples']),
scripts = ['scripts/gw-fit_strip_trajectories.py',
'scripts/gw-join_parts.py',
'scripts/gw-merge_topologies.py',
'scripts/gw-forcefield.py',
'scripts/gw-partial_tempering.py',
],
package_data={'gromacs': ['templates/*.sge', 'templates/*.pbs', # template files
'templates/*.ll', 'templates/*.sh',
'templates/*.mdp', 'templates/*.cfg',
'tests/data/fileformats/top/*.mdp', # test data
'tests/data/fileformats/top/*/*.top',
'tests/data/fileformats/top/*/*.gro',
'tests/data/*.log',
],
'vmd': ['*.tcl'], # server start in VMD
},
install_requires = ['numpy>=1.0',
'scipy', # numkit needs it
'six', # towards py 3 compatibility
], # basic package (w/o analysis)
extras_require = {
'analysis': ['matplotlib>=0.91.3',
'RecSQL>=0.7',
'pandas',
],
'numkit': ['scipy'],
},
tests_require = ['numpy', 'pandas'],
zip_safe = True,
)
|
jandom/GromacsWrapper
|
setup.py
|
Python
|
gpl-3.0
| 3,430
|
[
"Gromacs",
"VMD",
"VisIt"
] |
1605fc325eb30dbf4adc43b91a177568cbf88a7482d10f2d184dbb16ee33b027
|
from collections import deque
class SimpleDAG(object):
''' A simple implementation of a directed acyclic graph '''
def __init__(self):
self.nodes = []
self.root_nodes = set([])
r'''
Track node_obj->node index
dict where key is a full workflow node object or whatever we are
storing in ['node_object'] and value is an index to be used into
self.nodes
'''
self.node_obj_to_node_index = dict()
r'''
Track per-node from->to edges
i.e.
{
'success': {
1: [2, 3],
4: [2, 3],
},
'failed': {
1: [5],
}
}
'''
self.node_from_edges_by_label = dict()
r'''
Track per-node reverse relationship (child to parent)
i.e.
{
'success': {
2: [1, 4],
3: [1, 4],
},
'failed': {
5: [1],
}
}
'''
self.node_to_edges_by_label = dict()
def __contains__(self, obj):
if self.node['node_object'] in self.node_obj_to_node_index:
return True
return False
def __len__(self):
return len(self.nodes)
def __iter__(self):
return self.nodes.__iter__()
def generate_graphviz_plot(self, file_name="/awx_devel/graph.gv"):
def run_status(obj):
dnr = "RUN"
status = "NA"
if hasattr(obj, 'job') and obj.job and hasattr(obj.job, 'status'):
status = obj.job.status
if hasattr(obj, 'do_not_run') and obj.do_not_run is True:
dnr = "DNR"
return "{}_{}_{}".format(dnr, status, obj.id)
doc = """
digraph g {
rankdir = LR
"""
for n in self.nodes:
obj = n['node_object']
status = "NA"
if hasattr(obj, 'job') and obj.job:
status = obj.job.status
color = 'black'
if status == 'successful':
color = 'green'
elif status == 'failed':
color = 'red'
elif obj.do_not_run is True:
color = 'gray'
doc += "%s [color = %s]\n" % (
run_status(n['node_object']),
color
)
for label, edges in self.node_from_edges_by_label.items():
for from_node, to_nodes in edges.items():
for to_node in to_nodes:
doc += "%s -> %s [ label=\"%s\" ];\n" % (
run_status(self.nodes[from_node]['node_object']),
run_status(self.nodes[to_node]['node_object']),
label,
)
doc += "}\n"
gv_file = open(file_name, 'w')
gv_file.write(doc)
gv_file.close()
def add_node(self, obj, metadata=None):
if self.find_ord(obj) is None:
'''
Assume node is a root node until a child is added
'''
node_index = len(self.nodes)
self.root_nodes.add(node_index)
self.node_obj_to_node_index[obj] = node_index
entry = dict(node_object=obj, metadata=metadata)
self.nodes.append(entry)
def add_edge(self, from_obj, to_obj, label):
from_obj_ord = self.find_ord(from_obj)
to_obj_ord = self.find_ord(to_obj)
'''
To node is no longer a root node
'''
self.root_nodes.discard(to_obj_ord)
if from_obj_ord is None and to_obj_ord is None:
raise LookupError("From object {} and to object {} not found".format(from_obj, to_obj))
elif from_obj_ord is None:
raise LookupError("From object not found {}".format(from_obj))
elif to_obj_ord is None:
raise LookupError("To object not found {}".format(to_obj))
self.node_from_edges_by_label.setdefault(label, dict()) \
.setdefault(from_obj_ord, [])
self.node_to_edges_by_label.setdefault(label, dict()) \
.setdefault(to_obj_ord, [])
self.node_from_edges_by_label[label][from_obj_ord].append(to_obj_ord)
self.node_to_edges_by_label[label][to_obj_ord].append(from_obj_ord)
def find_ord(self, obj):
return self.node_obj_to_node_index.get(obj, None)
def _get_children_by_label(self, node_index, label):
return [self.nodes[index] for index in
self.node_from_edges_by_label.get(label, {})
.get(node_index, [])]
def get_children(self, obj, label=None):
this_ord = self.find_ord(obj)
nodes = []
if label:
return self._get_children_by_label(this_ord, label)
else:
nodes = []
for label_obj in self.node_from_edges_by_label.keys():
nodes.extend(self._get_children_by_label(this_ord, label_obj))
return nodes
def _get_parents_by_label(self, node_index, label):
return [self.nodes[index] for index in
self.node_to_edges_by_label.get(label, {})
.get(node_index, [])]
def get_parents(self, obj, label=None):
this_ord = self.find_ord(obj)
nodes = []
if label:
return self._get_parents_by_label(this_ord, label)
else:
nodes = []
for label_obj in self.node_to_edges_by_label.keys():
nodes.extend(self._get_parents_by_label(this_ord, label_obj))
return nodes
def get_root_nodes(self):
return [self.nodes[index] for index in self.root_nodes]
def has_cycle(self):
node_objs = [node['node_object'] for node in self.get_root_nodes()]
node_objs_visited = set([])
path = set([])
stack = node_objs
res = False
if len(self.nodes) != 0 and len(node_objs) == 0:
return True
while stack:
node_obj = stack.pop()
children = [node['node_object'] for node in self.get_children(node_obj)]
children_to_add = list(filter(lambda node_obj: node_obj not in node_objs_visited, children))
if children_to_add:
if node_obj in path:
res = True
break
path.add(node_obj)
stack.append(node_obj)
stack.extend(children_to_add)
else:
node_objs_visited.add(node_obj)
path.discard(node_obj)
return res
def sort_nodes_topological(self):
nodes_sorted = deque()
obj_ids_processed = set([])
def visit(node):
obj = node['node_object']
if obj.id in obj_ids_processed:
return
for child in self.get_children(obj):
visit(child)
obj_ids_processed.add(obj.id)
nodes_sorted.appendleft(node)
for node in self.nodes:
obj = node['node_object']
if obj.id in obj_ids_processed:
continue
visit(node)
return nodes_sorted
|
GoogleCloudPlatform/sap-deployment-automation
|
third_party/github.com/ansible/awx/awx/main/scheduler/dag_simple.py
|
Python
|
apache-2.0
| 7,319
|
[
"VisIt"
] |
68ab8bffbd8511470a73217b201e77bdd937a332e43e8ce76c71993eb352b849
|
"""Test check utilities."""
# Authors: MNE Developers
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
#
# License: BSD (3-clause)
import os
import os.path as op
import shutil
import sys
import numpy as np
import pytest
from pathlib import Path
import mne
from mne.datasets import testing
from mne.io.pick import pick_channels_cov
from mne.utils import (check_random_state, _check_fname, check_fname,
_check_subject, requires_mayavi, traits_test,
_check_mayavi_version, _check_info_inv, _check_option,
check_version, _check_path_like, _validate_type,
_suggest, _on_missing)
data_path = testing.data_path(download=False)
base_dir = op.join(data_path, 'MEG', 'sample')
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_event = op.join(base_dir, 'sample_audvis_trunc_raw-eve.fif')
fname_fwd = op.join(base_dir, 'sample_audvis_trunc-meg-vol-7-fwd.fif')
fname_mgz = op.join(data_path, 'subjects', 'sample', 'mri', 'aseg.mgz')
reject = dict(grad=4000e-13, mag=4e-12)
@testing.requires_testing_data
def test_check(tmpdir):
"""Test checking functions."""
pytest.raises(ValueError, check_random_state, 'foo')
pytest.raises(TypeError, _check_fname, 1)
_check_fname(Path('./'))
fname = str(tmpdir.join('foo'))
with open(fname, 'wb'):
pass
assert op.isfile(fname)
_check_fname(fname, overwrite='read', must_exist=True)
orig_perms = os.stat(fname).st_mode
os.chmod(fname, 0)
if not sys.platform.startswith('win'):
with pytest.raises(PermissionError, match='read permissions'):
_check_fname(fname, overwrite='read', must_exist=True)
os.chmod(fname, orig_perms)
os.remove(fname)
assert not op.isfile(fname)
pytest.raises(IOError, check_fname, 'foo', 'tets-dip.x', (), ('.fif',))
pytest.raises(ValueError, _check_subject, None, None)
pytest.raises(TypeError, _check_subject, None, 1)
pytest.raises(TypeError, _check_subject, 1, None)
# smoke tests for permitted types
check_random_state(None).choice(1)
check_random_state(0).choice(1)
check_random_state(np.random.RandomState(0)).choice(1)
if check_version('numpy', '1.17'):
check_random_state(np.random.default_rng(0)).choice(1)
# _meg.fif is a valid ending and should not raise an error
new_fname = str(
tmpdir.join(op.basename(fname_raw).replace('_raw.', '_meg.')))
shutil.copyfile(fname_raw, new_fname)
mne.io.read_raw_fif(new_fname)
@requires_mayavi
@traits_test
def test_check_mayavi():
"""Test mayavi version check."""
pytest.raises(RuntimeError, _check_mayavi_version, '100.0.0')
def _get_data():
"""Read in data used in tests."""
# read forward model
forward = mne.read_forward_solution(fname_fwd)
# read data
raw = mne.io.read_raw_fif(fname_raw, preload=True)
events = mne.read_events(fname_event)
event_id, tmin, tmax = 1, -0.1, 0.15
# decimate for speed
left_temporal_channels = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg=True,
selection=left_temporal_channels)
picks = picks[::2]
raw.pick_channels([raw.ch_names[ii] for ii in picks])
del picks
raw.info.normalize_proj() # avoid projection warnings
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
baseline=(None, 0.), preload=True, reject=reject)
noise_cov = mne.compute_covariance(epochs, tmin=None, tmax=0.)
data_cov = mne.compute_covariance(epochs, tmin=0.01, tmax=0.15)
return epochs, data_cov, noise_cov, forward
@testing.requires_testing_data
def test_check_info_inv():
"""Test checks for common channels across fwd model and cov matrices."""
epochs, data_cov, noise_cov, forward = _get_data()
# make sure same channel lists exist in data to make testing life easier
assert epochs.info['ch_names'] == data_cov.ch_names
assert epochs.info['ch_names'] == noise_cov.ch_names
# check whether bad channels get excluded from the channel selection
# info
info_bads = epochs.info.copy()
info_bads['bads'] = info_bads['ch_names'][1:3] # include two bad channels
picks = _check_info_inv(info_bads, forward, noise_cov=noise_cov)
assert [1, 2] not in picks
# covariance matrix
data_cov_bads = data_cov.copy()
data_cov_bads['bads'] = data_cov_bads.ch_names[0]
picks = _check_info_inv(epochs.info, forward, data_cov=data_cov_bads)
assert 0 not in picks
# noise covariance matrix
noise_cov_bads = noise_cov.copy()
noise_cov_bads['bads'] = noise_cov_bads.ch_names[1]
picks = _check_info_inv(epochs.info, forward, noise_cov=noise_cov_bads)
assert 1 not in picks
# test whether reference channels get deleted
info_ref = epochs.info.copy()
info_ref['chs'][0]['kind'] = 301 # pretend to have a ref channel
picks = _check_info_inv(info_ref, forward, noise_cov=noise_cov)
assert 0 not in picks
# pick channels in all inputs and make sure common set is returned
epochs.pick_channels([epochs.ch_names[ii] for ii in range(10)])
data_cov = pick_channels_cov(data_cov, include=[data_cov.ch_names[ii]
for ii in range(5, 20)])
noise_cov = pick_channels_cov(noise_cov, include=[noise_cov.ch_names[ii]
for ii in range(7, 12)])
picks = _check_info_inv(epochs.info, forward, noise_cov=noise_cov,
data_cov=data_cov)
assert list(range(7, 10)) == picks
def test_check_option():
"""Test checking the value of a parameter against a list of options."""
allowed_values = ['valid', 'good', 'ok']
# Value is allowed
assert _check_option('option', 'valid', allowed_values)
assert _check_option('option', 'good', allowed_values)
assert _check_option('option', 'ok', allowed_values)
assert _check_option('option', 'valid', ['valid'])
# Check error message for invalid value
msg = ("Invalid value for the 'option' parameter. Allowed values are "
"'valid', 'good' and 'ok', but got 'bad' instead.")
with pytest.raises(ValueError, match=msg):
assert _check_option('option', 'bad', allowed_values)
# Special error message if only one value is allowed
msg = ("Invalid value for the 'option' parameter. The only allowed value "
"is 'valid', but got 'bad' instead.")
with pytest.raises(ValueError, match=msg):
assert _check_option('option', 'bad', ['valid'])
def test_check_path_like():
"""Test _check_path_like()."""
str_path = str(base_dir)
pathlib_path = Path(base_dir)
no_path = dict(foo='bar')
assert _check_path_like(str_path) is True
assert _check_path_like(pathlib_path) is True
assert _check_path_like(no_path) is False
def test_validate_type():
"""Test _validate_type."""
_validate_type(1, 'int-like')
with pytest.raises(TypeError, match='int-like'):
_validate_type(False, 'int-like')
@testing.requires_testing_data
def test_suggest():
"""Test suggestions."""
names = mne.get_volume_labels_from_aseg(fname_mgz)
sug = _suggest('', names)
assert sug == '' # nothing
sug = _suggest('Left-cerebellum', names)
assert sug == " Did you mean 'Left-Cerebellum-Cortex'?"
sug = _suggest('Cerebellum-Cortex', names)
assert sug == " Did you mean one of ['Left-Cerebellum-Cortex', 'Right-Cerebellum-Cortex', 'Left-Cerebral-Cortex']?" # noqa: E501
def test_on_missing():
"""Test _on_missing."""
msg = 'test'
with pytest.raises(ValueError, match=msg):
_on_missing('raise', msg)
with pytest.warns(RuntimeWarning, match=msg):
_on_missing('warn', msg)
_on_missing('ignore', msg)
with pytest.raises(ValueError,
match='Invalid value for the \'on_missing\' parameter'):
_on_missing('foo', msg)
|
cjayb/mne-python
|
mne/utils/tests/test_check.py
|
Python
|
bsd-3-clause
| 8,044
|
[
"Mayavi"
] |
ddac3a791a5acc766f9c1d6e1e47fc745b721efff953e3cb66e1c1e18c743842
|
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
# Disable the "wildcard import" warning so we can bring in all methods from
# course helpers and ui helpers
# pylint: disable=wildcard-import
# Disable the "Unused import %s from wildcard import" warning
# pylint: disable=unused-wildcard-import
# Disable the "unused argument" warning because lettuce uses "step"
# pylint: disable=unused-argument
# django_url is assigned late in the process of loading lettuce,
from logging import getLogger
# so we import this as a module, and then read django_url from
# it to get the correct value
import lettuce.django
from lettuce import step, world
from nose.tools import assert_equals # pylint: disable=no-name-in-module
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from .course_helpers import *
from .ui_helpers import *
logger = getLogger(__name__)
@step(r'I wait (?:for )?"(\d+\.?\d*)" seconds?$')
def wait_for_seconds(step, seconds):
world.wait(seconds)
@step('I reload the page$')
def reload_the_page(step):
world.wait_for_ajax_complete()
world.browser.reload()
world.wait_for_js_to_load()
@step('I press the browser back button$')
def browser_back(step):
world.browser.driver.back()
@step('I (?:visit|access|open) the homepage$')
def i_visit_the_homepage(step):
world.visit('/')
assert world.is_css_present('header.global')
@step(u'I (?:visit|access|open) the dashboard$')
def i_visit_the_dashboard(step):
world.visit('/dashboard')
assert world.is_css_present('.container.dashboard')
@step('I should be on the dashboard page$')
def i_should_be_on_the_dashboard(step):
assert world.is_css_present('.container.dashboard')
assert 'Dashboard' in world.browser.title
@step(u'I (?:visit|access|open) the courses page$')
def i_am_on_the_courses_page(step):
world.visit('/courses')
assert world.is_css_present('div.courses')
@step(u'I press the "([^"]*)" button$')
def and_i_press_the_button(step, value):
button_css = 'input[value="%s"]' % value
world.css_click(button_css)
@step(u'I click the link with the text "([^"]*)"$')
def click_the_link_with_the_text_group1(step, linktext):
world.click_link(linktext)
@step('I should see that the path is "([^"]*)"$')
def i_should_see_that_the_path_is(step, path):
if 'COURSE' in world.scenario_dict:
path = path.format(world.scenario_dict['COURSE'].id)
assert world.url_equals(path), (
"path should be {!r} but is {!r}".format(path, world.browser.url)
)
@step(u'the page title should be "([^"]*)"$')
def the_page_title_should_be(step, title):
assert_equals(world.browser.title, title)
@step(u'the page title should contain "([^"]*)"$')
def the_page_title_should_contain(step, title):
assert title in world.browser.title
@step('I log in$')
def i_log_in(step):
world.log_in(username='robot', password='test')
@step('I am a logged in user$')
def i_am_logged_in_user(step):
world.create_user('robot', 'test')
world.log_in(username='robot', password='test')
@step('I am not logged in$')
def i_am_not_logged_in(step):
world.visit('logout')
@step('I am staff for course "([^"]*)"$')
def i_am_staff_for_course_by_id(step, course_id):
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
world.register_by_course_key(course_key, True)
@step(r'click (?:the|a) link (?:called|with the text) "([^"]*)"$')
def click_the_link_called(step, text):
world.click_link(text)
@step(r'should see that the url is "([^"]*)"$')
def should_have_the_url(step, url):
assert_equals(world.browser.url, url)
@step(r'should see (?:the|a) link (?:called|with the text) "([^"]*)"$')
def should_see_a_link_called(step, text):
assert len(world.browser.find_link_by_text(text)) > 0
@step(r'should see (?:the|a) link with the id "([^"]*)" called "([^"]*)"$')
def should_have_link_with_id_and_text(step, link_id, text):
link = world.browser.find_by_id(link_id)
assert len(link) > 0
assert_equals(link.text, text)
@step(r'should see a link to "([^"]*)" with the text "([^"]*)"$')
def should_have_link_with_path_and_text(step, path, text):
link = world.browser.find_link_by_text(text)
assert len(link) > 0
assert_equals(link.first["href"], lettuce.django.django_url(path))
@step(r'should( not)? see "(.*)" (?:somewhere|anywhere) (?:in|on) (?:the|this) page')
def should_see_in_the_page(step, doesnt_appear, text):
if world.LETTUCE_SELENIUM_CLIENT == 'saucelabs':
multiplier = 2
else:
multiplier = 1
if doesnt_appear:
assert world.browser.is_text_not_present(text, wait_time=5 * multiplier)
else:
assert world.browser.is_text_present(text, wait_time=5 * multiplier)
@step('I am logged in$')
def i_am_logged_in(step):
world.create_user('robot', 'test')
world.log_in(username='robot', password='test')
world.browser.visit(lettuce.django.django_url('/'))
dash_css = '.container.dashboard'
assert world.is_css_present(dash_css)
@step(u'I am an edX user$')
def i_am_an_edx_user(step):
world.create_user('robot', 'test')
@step(u'User "([^"]*)" is an edX user$')
def registered_edx_user(step, uname):
world.create_user(uname, 'test')
@step(u'All dialogs should be closed$')
def dialogs_are_closed(step):
assert world.dialogs_closed()
@step(u'visit the url "([^"]*)"')
def visit_url(step, url):
if 'COURSE' in world.scenario_dict:
url = url.format(world.scenario_dict['COURSE'].id)
world.browser.visit(lettuce.django.django_url(url))
@step(u'wait for AJAX to (?:finish|complete)')
def wait_ajax(_step):
wait_for_ajax_complete()
@step('I will confirm all alerts')
def i_confirm_all_alerts(step):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.confirm = function(){return true;} ; window.alert = function(){return;}')
@step('I will cancel all alerts')
def i_cancel_all_alerts(step):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.confirm = function(){return false;} ; window.alert = function(){return;}')
@step('I will answer all prompts with "([^"]*)"')
def i_answer_prompts_with(step, prompt):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.prompt = function(){return %s;}') % prompt
@step('I run ipdb')
def run_ipdb(_step):
"""Run ipdb as step for easy debugging"""
import ipdb
ipdb.set_trace()
assert True
@step(u'(I am viewing|s?he views) the course team settings$')
def view_course_team_settings(_step, whom):
""" navigates to course team settings page """
world.click_course_settings()
link_css = 'li.nav-course-settings-team a'
world.css_click(link_css)
|
fintech-circle/edx-platform
|
common/djangoapps/terrain/steps.py
|
Python
|
agpl-3.0
| 7,452
|
[
"VisIt"
] |
34e76bf317e2ec56ca1bf3e2844f53e41454b5d6d1f7bb6c0c315390700b64eb
|
# - Coding UTF8 -
#
# Networked Decision Making
# Development Sites (source code):
# http://code.google.com/p/global-decision-making-system/
# http://github.com/NewGlobalStrategy/NetDecisionMaking
#
# Demo Sites (Google App Engine)
# http://netdecisionmaking.appspot.com
# http://globaldecisionmaking.appspot.com
#
# License Code: MIT
# License Content: Creative Commons Attribution 3.0
#
# Also visit: www.web2py.com
# or Groups: http://groups.google.com/group/web2py
# For details on the web framework used for this development
#
# Developed by Russ King (newglobalstrategy@gmail.com
# Russ also blogs occasionally to pass the time at:
# http://proudofyourplanent.blogspot.com
# His general thinking on why this project is very important is available at
# http://www.scribd.com/doc/98216626/New-Global-Strategy
# With thanks to Guido, Massimo and many other that make this sort of thing
# much easier than it used to be
# This controller has 3 functions:
# my_questions for reviewing progress on questions you have asked
# my_answers for reviewing your answers
# resovled for reviewing resolved questio
"""This controller has 3 functiosns:
index is the new function for reviewing
questload - for loading into frontpage or elsewhere
actionload - for loading into frontpage or elsewhere
my_questions - for reviewing questions you have submitted
my_answers - for questions you have answered - this should move to action
resolved - for reviewing resolved questions -lets test if this is covered by action and should disappear
"""
def index():
#this is a new file aiming to replace action index and review resolved and finally review my answers
#Plan is to have up to 3 arguments for this which I thnk will be
#1 View - v
#2 Query - q
#3 Sort Order - s
#4 Page
#5 Items Per Page
#Valid values for view are:
#quest, action
#Valid values for query are:
#resolved, agreed, proposed and my - my is only valid if logged in
#Valid values for sort order are dependant on the view but may be
#priority, resolvedate, duedate, submitdate or responsible for actions
#
heading = 'Resolved Questions'
v = 'quest'
q = 'resolved'
s = 'resolved'
message = ''
fields = ['qsortorder', 'showscope', 'scope', 'continent', 'country', 'subdivision',
'showcat', 'category']
page = 0
if len(request.args):
v = request.args[0]
if len(request.args) > 1:
q = request.args[1]
if len(request.args) > 2:
s = request.args[2]
if len(request.args) > 3:
page = int(request.args[3])
if v == 'action':
fields = ['sortorder', 'showscope', 'scope', 'continent', 'country',
'subdivision', 'showcat', 'category']
heading = 'Agreed Actions'
#action index
#if q == 'quest':
# q = 'action'
#if s == 'resolved':
# s = 'priority'
#, formstyle = SQLFORM.formstyles.bootstrap3
form = SQLFORM(db.viewscope, fields=fields, formstyle='table3cols')
if session.showscope is None:
form.vars.showscope = False
form.vars.showcat = False
else:
form.vars.showscope = session.showscope
form.vars.showcat = session.showcat
form.vars.category = session.category
form.vars.scope = session.scope
form.vars.continent = session.vwcontinent
form.vars.country = session.vwcountry
form.vars.subdivision = session.vwsubdivision
items_per_page = 7
limitby = (page * items_per_page, (page + 1) * items_per_page + 1)
if v == 'action':
if session.sortorder is not None:
form.vars.sortorder = session.sortorder
else:
if session.qsortorder is not None:
form.vars.qsortorder = session.qsortorder
if form.validate():
session.showcat = form.vars.showcat
session.showscope = form.vars.showscope
session.scope = form.vars.scope
session.category = form.vars.category
session.vwcontinent = form.vars.continent
session.vwcountry = form.vars.country
session.vwsubdivision = form.vars.subdivision
if v == 'action':
session.sortorder = form.vars.sortorder
if session.sortorder == '1 Priority':
s = 'priority'
elif session.sortorder == '2 Due Date':
s = 'due'
elif session.sortorder == '3 Resolved Date':
s = 'resolved'
elif session.sortorder == '4 Submit Date':
s = 'submit'
elif session.sortorder == '5 Responsible':
s = 'responsible'
else:
session.qsortorder = form.vars.qsortorder
if session.qsortorder == '1 Priority':
s = 'priority'
elif session.qsortorder == '2 Resolved Date':
s = 'resolved'
elif session.qsortorder == '3 Submit Date':
s = 'submit'
page = 0
#from resolved
redirect(URL('index', args=[v, q, s], vars=request.vars))
if v == 'action':
###From Action
# Actions can be selected for all or status of Agreed, In Progress or Disagreed
# Rejected actions cannot be reviewed
query = (db.question.qtype == 'action')
if q == 'agreed':
query = (db.question.qtype == 'action') & (db.question.status == 'Agreed')
heading = 'Agreed actions'
elif q == 'proposed':
query = (db.question.qtype == 'action') & (db.question.status == 'In Progress')
heading = 'Proposed actions'
elif q == 'disagreed':
query = (db.question.qtype == 'action') & (db.question.status == 'Disagreed')
heading = 'Disagreed actions'
elif q == 'my' and auth.user is not None:
query = (db.question.qtype == 'action') & (db.question.auth_userid == auth.user.id)
heading = 'My actions'
elif q == 'my':
message = 'You are not logged in so default view shown'
if session.showcat is True:
query &= db.question.category == session.category
if session.showscope:
if session.scope == "1 Global":
query &= db.question.activescope == session.scope
elif session.scope == "2 Continental":
query = query & (db.question.activescope == session.scope) & (
db.question.continent == session.vwcontinent)
elif session.scope == "3 National":
query = query & (db.question.activescope == session.scope) & (
db.question.country == session.vwcountry)
elif session.scope == "4 Local":
query = query & (db.question.activescope == session.scope) & (
db.question.subdivision == session.vwsubdivision)
# And they can be sorted by create date, priority and due date
# not got a control for this yet probably part of request.args
sortby = ~db.question.priority
if s == 'due':
sortby = db.question.duedate
elif s == 'create':
sortby = ~db.question.createdate
elif s == 'resolved':
sortby = ~db.question.resolvedate
elif s == 'responsible':
sortby = db.question.responsible
quests = db(query).select(
db.question.id, db.question.status, db.question.questiontext, db.question.duedate,
db.question.responsible, db.question.priority, db.question.achieved, db.question.level,
db.question.correctanstext, db.question.numagree, db.question.numdisagree,
db.question.activescope, db.question.category, db.question.continent,
db.question.country, db.question.subdivision, db.question.scopetext,
orderby=sortby, limitby=limitby)
else:
# Actions can be selected for all or status of Agreed, In Progress or Disagreed
# Rejected actions cannot be reviewed
query = (db.question.qtype == 'quest') & (db.question.status == 'Resolved')
if q == 'Que':
query = (db.question.qtype == 'quest') & (db.question.status == 'Resolved')
heading = 'Resolved Questions'
if q == 'reject': # we might show this and perhaps even allow challenges
query = (db.question.qtype == 'quest') & (db.question.status == 'Rejected')
heading = 'Rejected Quesions'
elif q == 'inprog': # we are not showing this for philosophical reasons at the moment
query = (db.question.qtype == 'quest') & (db.question.status == 'Resolved')
heading = 'Questions in Progress'
elif q == 'my' and auth.user is not None:
query = (db.question.auth_userid == auth.user.id) & (db.question.qtype == 'quest')
heading = 'My Questions'
if session.showcat is True:
query &= db.question.category == session.category
if session.showscope is True:
query &= db.question.activescope == session.scope
if session.scope == '1 Global':
query &= db.question.activescope == session.scope
elif session.scope == '2 Continental':
query = query & (db.question.activescope == session.scope) & (
db.question.continent == session.vwcontinent)
elif session.scope == '3 National':
query = query & (db.question.activescope == session.scope) & (
db.question.country == session.vwcountry)
elif session.scope == '4 Local':
query = query & (db.question.activescope == session.scope) & (
db.question.subdivision == session.vwsubdivision)
# And they can be sorted by create date, priority and due date
sortby = ~db.question.priority
if s == 'resolved':
sortby = ~db.question.resolvedate
elif s == 'submit':
sortby = ~db.question.createdate
quests = db(query).select(
db.question.id, db.question.status, db.question.questiontext, db.question.duedate,
db.question.responsible, db.question.priority, db.question.achieved, db.question.level,
db.question.correctanstext, db.question.numagree, db.question.numdisagree,
db.question.activescope, db.question.category, db.question.continent,
db.question.country, db.question.subdivision, db.question.scopetext,
orderby=sortby, limitby=limitby)
session.networklist = [x.id for x in quests]
return dict(form=form, quests=quests, page=page, items_per_page=items_per_page, v=v, q=q,
s=s, query=query, heading=heading, message=message)
@auth.requires_login()
def my_answers():
fields = ['asortorder', 'showscope', 'scope', 'continent', 'country', 'subdivision',
'showcat', 'category']
form = SQLFORM(db.viewscope, fields=fields, formstyle='table3cols')
page = 0
q = 'Que'
s = 'Resolved'
if session.showscope is None:
form.vars.showscope = False
form.vars.showcat = False
else:
form.vars.showscope = session.showscope
form.vars.showcat = session.showcat
form.vars.category = session.category
form.vars.scope = session.scope
form.vars.continent = session.vwcontinent
form.vars.country = session.vwcountry
form.vars.subdivision = session.vwsubdivision
if session.sortorder is not None:
form.vars.asortorder = session.sortorder
if len(request.args):
page = int(request.args[0])
if len(request.args) > 1:
q = request.args[1]
if len(request.args) > 2:
s = request.args[2]
items_per_page = 10
limitby = (page * items_per_page, (page + 1) * items_per_page + 1)
if session.sortorder is not None:
if session.sortorder == '1 Answer Date':
s = 'Answer'
elif session.sortorder == '2 Resolved Date':
s = 'Resolved'
elif session.sortorder == '3 Category':
s = 'Category'
if form.validate():
session.showcat = form.vars.showcat
session.showscope = form.vars.showscope
session.scope = form.vars.scope
session.category = form.vars.category
session.vwcontinent = form.vars.continent
session.vwcountry = form.vars.country
session.vwsubdivision = form.vars.subdivision
session.sortorder = form.vars.asortorder
if session.sortorder == '1 Answer Date':
s = 'Answer'
elif session.sortorder == '2 Resolved Date':
s = 'Resolved'
elif session.sortorder == '3 Category':
s = 'Category'
page = 0
redirect(URL('my_answers', args=[page, q, s]))
# Actions can be selected for all or status of Agreed, In Progress or Disagreed
# Rejected actions cannot be reviewed
query = (db.userquestion.auth_userid == auth.user.id)
if q == 'Resolved':
query &= db.userquestion.status == 'Resolved'
elif q == 'InProg': # we are not showing this for philosophical reasons at the moment
query &= db.userquestion.status == 'In Progress'
if session.showcat is True:
query &= db.userquestion.category == session.category
if session.showscope is True:
query &= db.userquestion.activescope == session.scope
if session.scope == '1 Global':
query &= db.userquestion.activescope == session.scope
elif session.scope == '2 Continental':
query = query & (db.userquestion.activescope == session.scope) & (
db.userquestion.continent == session.vwcontinent)
elif session.scope == '3 National':
query = query & (db.userquestion.activescope == session.scope) & (
db.userquestion.country == session.vwcountry)
elif session.scope == '4 Local':
query = query & (db.userquestion.activescope == session.scope) & (
db.userquestion.subdivision == session.vwsubdivision)
# And they can be sorted by create date, priority and due date
sortby = ~db.userquestion.ansdate
if s == 'Resolved':
sortby = ~db.userquestion.resolvedate
elif s == 'Category':
sortby = db.userquestion.category
quests = db(query).select(db.userquestion.id, db.userquestion.status,
db.userquestion.questionid, db.userquestion.answer, db.userquestion.score,
orderby=[sortby], limitby=limitby)
return dict(form=form, quests=quests, page=page, items_per_page=items_per_page, q=q, s=s, query=query)
|
NewGlobalStrategy/NetDecisionMaking
|
controllers/review.py
|
Python
|
mit
| 14,815
|
[
"VisIt"
] |
ae86587cb9f00ad3f9981e88c5e77719b78faea0045b82cca1509cf7667dcff6
|
# USBMtp.py
#
# Contains class definitions to implement a USB keyboard.
import struct
from umap2.core.usb_device import USBDevice
from umap2.core.usb_configuration import USBConfiguration
from umap2.core.usb_interface import USBInterface
from umap2.core.usb_endpoint import USBEndpoint
from umap2.core.usb_vendor import USBVendor
from umap2.core.usb_class import USBClass
from umap2.fuzz.helpers import mutable
try:
from mtpdevice.mtp_device import MtpDevice, MtpDeviceInfo
from mtpdevice.mtp_object import MtpObject
from mtpdevice.mtp_storage import MtpStorage, MtpStorageInfo
from mtpdevice.mtp_api import MtpApi
from mtpdevice.mtp_property import MtpDeviceProperty, MtpDevicePropertyCode
from mtpdevice.mtp_data_types import MStr, UInt8
mtpdeviceloaded = True
except:
print('Failed to load mtpdevice. please install pymtpdevice (https://github.com/BinyaminSharet/Mtp)')
mtpdeviceloaded = False
class USBMtpInterface(USBInterface):
name = 'MtpInterface'
def __init__(self, app, phy):
if not mtpdeviceloaded:
raise Exception('You cannot use USBMtp until you install pymtpdevice')
# TODO: un-hardcode string index (last arg before 'verbose')
super(USBMtpInterface, self).__init__(
app=app,
phy=phy,
interface_number=0,
interface_alternate=0,
interface_class=USBClass.VendorSpecific,
interface_subclass=0xff,
interface_protocol=0,
interface_string_index=0,
endpoints=[
USBEndpoint(
app=app,
phy=phy,
number=1,
direction=USBEndpoint.direction_out,
transfer_type=USBEndpoint.transfer_type_bulk,
sync_type=USBEndpoint.sync_type_none,
usage_type=USBEndpoint.usage_type_data,
max_packet_size=64,
interval=0,
handler=self.handle_ep1_data_available
),
USBEndpoint(
app=app,
phy=phy,
number=2,
direction=USBEndpoint.direction_in,
transfer_type=USBEndpoint.transfer_type_bulk,
sync_type=USBEndpoint.sync_type_none,
usage_type=USBEndpoint.usage_type_data,
max_packet_size=64,
interval=0,
handler=None
),
USBEndpoint(
app=app,
phy=phy,
number=3,
direction=USBEndpoint.direction_in,
transfer_type=USBEndpoint.transfer_type_interrupt,
sync_type=USBEndpoint.sync_type_none,
usage_type=USBEndpoint.usage_type_data,
max_packet_size=64,
interval=32,
handler=None
),
],
)
self.object = MtpObject.from_fs_recursive('mtp_fs')
# self.object = MtpObject.from_fs_recursive('mtp_fs/eits.mp3')
self.storage_info = MtpStorageInfo(
st_type=1,
fs_type=2,
access=0,
max_cap=150000,
free_bytes=0,
free_objs=0,
desc='MyStorage',
vol_id='Python MTP Device Stack',
)
self.storage = MtpStorage(self.storage_info)
self.storage.add_object(self.object)
self.dev_info = MtpDeviceInfo(
std_version=0x0064,
mtp_vendor_ext_id=0x00000006,
mtp_version=0x0064,
mtp_extensions='microsoft.com: 1.0;',
functional_mode=0x0000,
capture_formats=[],
playback_formats=[],
manufacturer='UMAP2',
model='Role',
device_version='1.2',
serial_number='3031323334353637',
)
properties = [
MtpDeviceProperty(MtpDevicePropertyCode.MTP_DeviceFriendlyName, 0, MStr('UmapMtpDevice'), MStr('')),
MtpDeviceProperty(MtpDevicePropertyCode.BatteryLevel, 0, UInt8(100), UInt8(0))
]
self.dev = MtpDevice(self.dev_info, properties, self.logger)
self.dev.add_storage(self.storage)
self.dev.set_fuzzer(app.fuzzer)
self.api = MtpApi(self.dev)
# OS String descriptor
# self.add_string_with_id(50, 'MTP'.encode('utf-16') + b'\x00\x00')
self.add_string_with_id(0xee, 'MSFT100'.encode('utf-16') + b'\x00\x00')
def handle_ep1_data_available(self, data):
resps = self.api.handle_payload(data)
if resps:
for resp in resps:
self.send_on_endpoint(2, resp)
class USBMsosVendor(USBVendor):
def setup_local_handlers(self):
self.local_handlers = {
0x00: self.handle_msos_vendor_extended_config_descriptor,
}
@mutable('msos_vendor_extended_config_descriptor')
def handle_msos_vendor_extended_config_descriptor(self, req):
'''
Taken from OS_Desc_CompatID
https://msdn.microsoft.com/en-us/windows/hardware/gg463179
'''
def pad(data, pad_len=8):
to_pad = pad_len - len(data)
return data + (b'\x00' * to_pad)
self.property_sections = [
[0x00, 0x01, pad(b'MTP'), pad(b''), pad(b'', 6)]
]
bcdVersion = 0x0100
wIndex = 0x00
bCount = len(self.property_sections)
reserved = pad(b'\x00', 7)
properties = b''
for prop in self.property_sections:
properties += struct.pack('BB', prop[0], prop[1]) + prop[2] + prop[3] + prop[4]
payload = struct.pack('<HHB', bcdVersion, wIndex, bCount) + reserved + properties
dwLength = len(payload) + 4
payload = struct.pack('<I', dwLength) + payload
return payload
class USBMtpDevice(USBDevice):
name = 'MtpDevice'
def __init__(self, app, phy, vid=0x4e8, pid=0x685c, rev=0x0002, **kwargs):
super(USBMtpDevice, self).__init__(
app=app,
phy=phy,
device_class=USBClass.Unspecified,
device_subclass=0,
protocol_rel_num=0,
max_packet_size_ep0=64,
vendor_id=vid,
product_id=pid,
device_rev=rev,
manufacturer_string='Samsung Electronics Co., Ltd',
product_string='GT-I9250 Phone [Galaxy Nexus]',
serial_number_string='00001',
configurations=[
USBConfiguration(
app=app,
phy=phy,
index=1,
string='Android MTP Device',
interfaces=[
USBMtpInterface(app, phy)
]
)
],
usb_vendor=USBMsosVendor(app=app, phy=phy),
)
usb_device = USBMtpDevice
|
nccgroup/umap2
|
umap2/dev/mtp.py
|
Python
|
agpl-3.0
| 7,018
|
[
"Galaxy"
] |
2150ea84153e5e79b00b7fac4140d3059d515ec37c668d6d28e069add57b5322
|
import logging
import logging.config
import struct
import threading
import signal
import traceback
from ambercommon.common import runtime
import os
from amberdriver.common import drivermsg_pb2
__author__ = 'paoolo'
LEN_SIZE = 2
pwd = os.path.dirname(os.path.abspath(__file__))
logging.config.fileConfig('%s/amber.ini' % pwd)
LOGGER_NAME = 'AmberPipes'
class AmberException(Exception):
def __init__(self, message=None, cause=None):
Exception.__init__(message + u', caused by ' + repr(cause))
self.cause = cause
class AmberPipes(object):
def __init__(self, message_handler, pipe_in, pipe_out):
self.__message_handler = message_handler
self.__pipe_in, self.__pipe_out = pipe_in, pipe_out
self.__is_alive = True
self.__write_lock = threading.Lock()
self.__logger = logging.getLogger(LOGGER_NAME)
runtime.add_shutdown_hook(self.terminate)
def __call__(self, *args, **kwargs):
self.run()
def run(self):
self.__logger.info('Pipes thread started.')
self.__amber_pipes_loop()
def is_alive(self):
return self.__is_alive
def __amber_pipes_loop(self):
try:
while self.__is_alive:
header, message = self.__read_header_and_message_from_pipe()
self.__handle_header_and_message(header, message)
except BaseException as e:
self.__logger.fatal('Stop due to error on pipe with mediator')
traceback.print_exc(e)
self.__is_alive = False
os.kill(os.getpid(), signal.SIGTERM)
def __read_header_and_message_from_pipe(self):
"""
Read and parse header and message from pipe.
:return: header and message
"""
header = drivermsg_pb2.DriverHdr()
message = drivermsg_pb2.DriverMsg()
header = self.__read_data_from_pipe(header)
message = self.__read_data_from_pipe(message)
return header, message
def __read_data_from_pipe(self, container):
data = self.__read_and_unpack_data_from_pipe(LEN_SIZE)
container.ParseFromString(data)
return container
def __read_and_unpack_data_from_pipe(self, size):
"""
Read and unpack data from pipe.
:param size: size of length data
:return: binary string
"""
data = self.__read_from_pipe(size)
# FIXME: can generate error, why?
size = struct.unpack('!h', data)
data = self.__read_from_pipe(size[0])
return data
def __read_from_pipe(self, size):
"""
Read binary string from pipe.
:param size: size of read string
:return: binary string
"""
return self.__pipe_in.read(size)
def __handle_header_and_message(self, header, message):
"""
Handle any message. Not serviced message are PONG and DRIVER_DIED.
:param header: object of DriverHdr
:param message: object of DriverMsg
:return: nothing
"""
if message.type == drivermsg_pb2.DriverMsg.DATA:
self.__logger.debug('Received DATA message')
self.__message_handler.handle_data_message(header, message)
elif message.type == drivermsg_pb2.DriverMsg.SUBSCRIBE:
self.__logger.debug('Received SUBSCRIBE message')
self.__message_handler.handle_subscribe_message(header, message)
elif message.type == drivermsg_pb2.DriverMsg.UNSUBSCRIBE:
self.__logger.debug('Received UNSUBSCRIBE message')
self.__message_handler.handle_unsubscribe_message(header, message)
elif message.type == drivermsg_pb2.DriverMsg.CLIENT_DIED:
self.__logger.debug('Received CLIENT_DIED message')
self.__handle_client_died_message(header, message)
elif message.type == drivermsg_pb2.DriverMsg.PING:
self.__logger.debug('Received PING message')
self.__handle_ping_message(header, message)
else:
self.__logger.warning('Received unknown type message, ignoring.')
def __handle_client_died_message(self, header, _):
"""
Handle CLIENT_DIED message which came from mediator.
Handling message delegated to message handler.
:param header: object of DriverHdr
:return: nothing
"""
if len(header.clientIDs) < 1:
self.__logger.warning('CLIENT_DIED\'s clientID not set, ignoring.')
else:
self.__message_handler.handle_client_died_message(header.clientIDs[0])
def __handle_ping_message(self, ping_header, ping_message):
"""
Handle PING message which came from mediator.
:param ping_header: object of DriverHdr
:param ping_message: object of DriverMsg
:return: nothing
"""
if not ping_message.HasField('synNum'):
self.__logger.warning('PING\'s synNum is not set, ignoring.')
else:
pong_message = drivermsg_pb2.DriverMsg()
pong_message.type = drivermsg_pb2.DriverMsg.PONG
pong_message.ackNum = ping_message.synNum
pong_header = drivermsg_pb2.DriverHdr()
pong_header.clientIDs.extend(ping_header.clientIDs)
self.__logger.debug('Send PONG message')
self.write_header_and_message_to_pipe(pong_header, pong_message)
def write_header_and_message_to_pipe(self, header, message):
"""
Serialize and write header and message to pipe.
:param header: object of DriverHdr
:param message: object of DriverMsg
:return: nothing
"""
self.__logger.debug('Write header and message to pipe:\nHEADER:\n%s\n---\nMESSAGE:\n%s\n---',
str(header).strip(), str(message).strip()[:200])
self.__write_lock.acquire()
try:
header_data = header.SerializeToString()
message_data = message.SerializeToString()
header_binary_data = struct.pack('!h', len(header_data)) + header_data
message_binary_data = struct.pack('!h', len(message_data)) + message_data
self.__write_to_pipe(header_binary_data + message_binary_data)
except BaseException as e:
traceback.print_exc(e)
raise AmberException(cause=e)
finally:
self.__write_lock.release()
def __write_to_pipe(self, binary_string):
"""
Write string binary to pipe.
:param binary_string: binary string
:return: nothing
"""
self.__pipe_out.write(binary_string)
self.__pipe_out.flush()
def terminate(self):
self.__is_alive = False
|
project-capo/amber-python-drivers
|
src/amberdriver/common/amber_pipes.py
|
Python
|
mit
| 6,725
|
[
"Amber"
] |
0cf3a523f3cf8262bcbbecdaf00d0db9b6c27ab00ee03f74deaad09bc70a6a3b
|
# Copyright (c) 2004 Divmod.
# See LICENSE for details.
from nevow import stan
from nevow.testutil import TestCase
class TestProto(TestCase):
def test_proto(self):
tagName = "hello"
proto = stan.Proto(tagName)
self.assertEquals(tagName, str(proto))
def test_callCreatesTag(self):
proto = stan.Proto("hello")
tag = proto(world="1")
self.assertEquals(proto, tag.tagName)
self.assertEquals(tag.attributes['world'], '1')
def test_getItemCreatesTag(self):
proto = stan.Proto("hello")
tag = proto[proto]
self.assertEquals(proto, tag.tagName)
self.assertEquals(tag.children, [proto])
proto = stan.Proto("hello")
class TestTag(TestCase):
def test_clone(self):
tag = proto(hello="world")["How are you"]
tag.fillSlots('foo', 'bar')
tag.filename = "foo/bar"
tag.lineNumber = 6
tag.columnNumber = 12
clone = tag.clone()
self.assertEquals(clone.attributes['hello'], 'world')
self.assertNotIdentical(clone.attributes, tag.attributes)
self.assertEquals(clone.children, ["How are you"])
self.assertNotIdentical(clone.children, tag.children)
self.assertEquals(tag.slotData, clone.slotData)
self.assertNotIdentical(tag.slotData, clone.slotData)
self.assertEqual(clone.filename, "foo/bar")
self.assertEqual(clone.lineNumber, 6)
self.assertEqual(clone.columnNumber, 12)
## TODO: need better clone test here to test clone(deep=True),
## and behavior of cloning nested lists.
def test_clear(self):
tag = proto["these are", "children", "cool"]
tag.clear()
self.assertEquals(tag.children, [])
def test_specials(self):
tag = proto(data=1, render=str, remember="stuff", key="myKey", **{'pattern': "item"})
self.assertEquals(tag.data, 1)
self.assertEquals(getattr(tag, 'render'), str)
self.assertEquals(tag.remember, "stuff")
self.assertEquals(tag.key, "myKey")
self.assertEquals(tag.pattern, "item")
def test_visit(self):
"""
Test that L{nevow.stan.visit} invokes the visitor it is given with all
the nodes in the DOM it is given in pre-order.
"""
visited = []
def visitor(t):
visited.append(t)
root = stan.Proto('root')()
firstChild = stan.Proto('firstChild')()
secondChild = stan.Proto('secondChild')()
firstGrandchild = stan.Proto('firstGrandchild')()
secondGrandchild = stan.Proto('secondGrandchild')()
thirdGrandchild = 'thirdGrandchild'
root[firstChild, secondChild]
secondChild[firstGrandchild, secondGrandchild, thirdGrandchild]
stan.visit(root, visitor)
self.assertEquals(
visited,
[root, firstChild, secondChild,
firstGrandchild, secondGrandchild, thirdGrandchild])
class TestComment(TestCase):
def test_notCallable(self):
comment = stan.CommentProto()
self.assertRaises(NotImplementedError, comment, id='oops')
class TestUnderscore(TestCase):
def test_prefix(self):
proto = stan.Proto('div')
tag = proto()
tag(_class='a')
self.assertEquals(tag.attributes, {'class': 'a'})
def test_suffix(self):
proto = stan.Proto('div')
tag = proto()
tag(class_='a')
self.assertEquals(tag.attributes, {'class': 'a'})
|
UstadMobile/exelearning-ustadmobile-work
|
nevow/test/test_stan.py
|
Python
|
gpl-2.0
| 3,491
|
[
"VisIt"
] |
7f84b633fa945cae70b6d9faef416b7a633d54ae69a84b9abf7cfd42dd499439
|
#!/usr/bin/python
"""
This script analysis a folder containing bpch files and outputs the results
in a single netCDF file in the folder.
This allows for significantly faster and easier input of data,
and more common anaylsis techniques like pandas without extra
post processing.
"""
import logging
import sys
import glob
import os
import netCDF4
if sys.version_info.major < 3:
try:
import iris
except ImportError:
print('WARNING iris not imported')
# retain back compatibility for PyGChem
try:
if (sys.version_info.major <= 2):
import pygchem
if pygchem.__version__ == '0.2.0':
import pygchem.diagnostics as gdiag
else:
try:
from pygchem import datasets
except:
import pygchem.datafields as datasets
except ImportError:
print('pygchem not imported!')
def convert_to_netCDF(folder=None, filename='ctm.nc', bpch_file_list=None,
remake=False, hemco_file_list=None, verbose=True,
bpch_file_type="*.ctm.nc"):
"""
Converts GEOS-Chem outputs to netCDF
Parameters
----------
folder (str): specify the folder you want to use - defaults to cwd
filename (str): specific the netCDF filename you want to use
bpch_file_list (list): list the bpch files you want to use
remake (bool): Overwrite any old files (default=False)
Notes
-----
Setup for:
- bpch_to_netCDF
- hemco_to_netCDF
- planeflight_to_netCDF
"""
logging.debug("Convert to netCDF called with folder={},".format(folder) +
" bpch_file_type={}/filename={}".format(bpch_file_type, filename))
# try:
bpch_to_netCDF(folder=folder, filename=filename,
bpch_file_list=bpch_file_list, remake=remake,
file_type=bpch_file_type, verbose=verbose)
# except:
# logging.error("Could not convert bpch to netCDF in {_dir}"\
# .format(_dir=folder))
# try:
# hemco_to_netCDF( folder, hemco_file_list, remake)
# except:
# logging.warning("Could not convert hemco to netCDF in {_dir}"\
# .format(_dir=folder))
return
def hemco_to_netCDF(folder, hemco_file_list=None, remake=False):
"""
Conbine HEMCO diagnostic output files to a single NetCDF file.
Parameters
----------
remake (bool): overwrite existing NetCDF file
"""
if __package__ is None:
from .bpch2netCDF import get_folder
else:
from .bpch2netCDF import get_folder
folder = get_folder(folder)
output_file = os.path.join(folder, 'hemco.nc')
# If the hemco netCDF file already exists then quit unless remake=True
if not remake:
if os.path.exists(output_file):
logging.warning(output_file + ' already exists, not remaking')
return
logging.info("Combining hemco diagnostic files")
# By default look for any files that look like hemco diagnostic files:
# Look for all hemco netcdf files then remove the restart files.
if hemco_file_list == None:
hemco_files = glob.glob(folder + '/*HEMCO*.nc')
for filename in hemco_files:
if "restart" in filename:
hemco_files.remove(filename)
else:
file_list = []
for hemco_file in hemco_file_list:
full_path = os.path.join(folder, hemco_file)
if not os.path.exists(full_path):
logging.error(full_path + " could not be found")
raise IOError(
"{path} could not be found".format(path=full_path))
file_list.append(full_path)
hemco_files = file_list
if len(hemco_files) == 0:
logging.warning("No hemco diagnostic files found in {_dir}"
.format(_dir=folder))
else:
logging.debug("The following hemco files were found:")
logging.debug(str(hemco_files))
# Use iris cubes to combine the data into an output file
hemco_data = iris.load(hemco_files)
# Concatanate the times.
hemco_data = hemco_data.concatenate()
iris.save(hemco_data, output_file)
logging.info(str(hemco_data))
logging.info("Hecmo file created at {file}".format(file=output_file))
return
def bpch_to_netCDF(folder=None, filename='ctm.nc', bpch_file_list=None,
remake=False, filetype="*ctm.bpch*",
check4_trac_avg_if_no_ctm_bpch=True, backend='PyGChem',
verbose=False, **kwargs):
"""
Converts GEOS-Chem ctm.bpch output file(s) to NetCDF
Parameters
----------
folder (str): working directory for data files
filename (str): name to give created NetCDF
bpch_file_list (list): list of files to convert
remake (bool): overwrite existing NetCDF file
filetype (str): string with wildcards to match filenames
( e.g. *ctm.bpch*, trac_avg.*, or *ts*bpch* )
verbose (bool): print (minor) logging to screen
Returns
-------
(None) saves a NetCDF file to disk
"""
import os
# Check if file already exists and warn about remaking
if __package__ is None:
from .bpch2netCDF import get_folder
else:
from .bpch2netCDF import get_folder
folder = get_folder(folder)
output_file = os.path.join(folder, filename)
# If the netCDf file already exists dont overwrite it without remake=True.
if not remake:
if os.path.exists(output_file):
logging.warning(output_file + ' already exists. Not recreating.')
return
# Look for files if file list is not provided.
if isinstance(bpch_file_list, type(None)):
logging.debug("Searching for the following bpch filetype: {filetype}"
.format(filetype=filetype))
bpch_files = glob.glob(folder + '/' + filetype)
# Also check if directory contains *trac_avg* files, if no ctm.bpch
if (len(bpch_files) == 0) and check4_trac_avg_if_no_ctm_bpch:
filetype = '*trac_avg*'
logging.info('WARNING! - now trying filetype={}'.format(filetype))
bpch_files = glob.glob(folder + '/' + filetype)
# Raise error if no files matching filetype
if len(bpch_files) == 0:
logging.error("No bpch files ({}) found in {}".format(filetype,
folder))
raise IOError("{} contains no bpch files.".format(folder))
# Use the specified files.
else:
file_list = []
for bpch_file in bpch_file_list:
full_path = folder + '/' + bpch_file
if not os.path.exists(full_path):
logging.error(full_path + " could not be found")
raise IOError("Full path could not be found")
file_list.append(full_path)
bpch_files = file_list
# Open the bpch files
logging.debug("The following bpch files were found (n={}):"
.format(len(bpch_files)))
logging.debug(str(bpch_files))
if verbose:
print(("Creating a netCDF from {} file(s).".format(len(bpch_files)) +
" This can take some time..."))
if backend == 'PyGChem':
# Load all the files into memory
bpch_data = datasets.load(bpch_files)
# Save the netCDF file
datasets.save(bpch_data, output_file)
elif backend == 'xbpch':
import xbpch
# Load all the files into memory (as xarray dataset object)
ds = xbpch.open_mfbpchdataset(bpch_files)
# save through xarray dataset object
ds.to_netcdf(output_file, unlimited_dims={'time_counter': True})
elif backend == 'iris':
# iris.fileformats.netcdf.save(data, output_file)
print('WARNING NetCDF made by iris is non CF-compliant')
elif backend == 'PNC':
import PseudoNetCDF as pnc
import xarray as xr
if len(bpch_files) == 1:
bpch_to_netCDF_via_PNC(filename=filename,
output_file=output_file, bpch_file=bpch_files[0])
# Individually convert bpch files if more than one file
if len(bpch_files) > 1:
for n_bpch_file, bpch_file in enumerate(bpch_files):
bpch_to_netCDF_via_PNC(filename=filename,
output_file='TEMP_{}_'.format(
n_bpch_file)+filename,
bpch_file=bpch_file)
# - Combine the NetCDF files with xarray
TEMP_ncfiles = glob.glob(folder+'TEMP_*_'+filename)
# Open files with xarray
ds_l = [xr.open_dataset(i) for i in TEMP_ncfiles]
# Make sure the time dimension is unlimitetd
ds = xr.concat(ds_l, dim='time')
# Now save the combined file
ds.to_netcdf(folder+filename,
unlimited_dims={'time_counter': True})
# Remove the temporary files
for TEMP_ncfile in TEMP_ncfiles:
os.remove(TEMP_ncfile)
logging.info("A netCDF file has been created with the name {ctm}"
.format(ctm=output_file))
return
def bpch_to_netCDF_via_PNC(format='bpch2', filename='ctm.nc',
output_file=None, bpch_file=None, folder=None):
""" Convert bpch to NetCDF using PNC as backend """
import PseudoNetCDF as pnc
# Load the file into memory
infile = pnc.pncopen(bpch_file, format=format)
# Kludge - reduce DXYP_DXYP dims online
dxyp = infile.variables['DXYP_DXYP']
# Surface area should have time dim, if fit does remove it.
if len(dxyp.shape) == 4:
dxyp.dimensions = dxyp.dimensions[1:]
infile.variables['DXYP_DXYP'] = dxyp
# Now write file to disc
# pnc.pncwrite(infile, folder+filename)
pnc.pncwrite(infile, output_file)
def get_folder(folder):
"""
Get name of folder that contains ctm.bpch data from command line
"""
if isinstance(folder, type(None)):
# getting the folder location from system argument
if len(sys.argv) <= 1:
logging.warning("No folder location specified for the data")
folder = os.getcwd()
else:
folder = str(sys.argv[1])
# Check folder exists
if not os.path.exists(folder):
print("Folder does not exist")
print(folder)
sys.exit()
return folder
if __name__ == "__main__":
convert_to_netCDF()
print("Complete")
|
tsherwen/AC_tools
|
AC_tools/bpch2netCDF.py
|
Python
|
mit
| 10,563
|
[
"NetCDF"
] |
7661a91be3c67e5f47c70337e84bd646716a20e3002195524dae5af4df7d1108
|
#! /usr/bin/env python
import sys, shutil
from numpy import *
from os import path, makedirs
from glob import glob
import subprocess
class color:
"""
define colors in the terminal
"""
purple = '\033[95m'
cyan = '\033[96m'
darkcyan = '\033[36m'
blue = '\033[94m'
green = '\033[92m'
yellow = '\033[93m'
red = '\033[91m'
bold = '\033[1m'
underline = '\033[4m'
end = '\033[0m'
grid_nrap = 11
rand_flag = 1
# the width of the Gaussian in the transverse plane
sigma_perp = 0.5
# peak position in the longitudinal direction
eta_0 = 2.0
# the width of the Gaussian in the longitudinal direction
sigma_eta = 0.5
centrality_list = ['0-5', '5-10', '10-20', '20-30',
'30-40', '40-50', '50-60', '60-70', '70-80']
def get_eta_factor_left(eta_local, beam_rapidity):
exp_factor = 1.0
if abs(eta_local) > eta_0:
exp_factor = exp(-(abs(eta_local) - eta_0)**2./(2.*sigma_eta**2.))
eta_left = 0.5*(1. - eta_local/beam_rapidity)*exp_factor
return(eta_left)
def get_eta_factor_right(eta_local, beam_rapidity):
exp_factor = 1.0
if abs(eta_local) > eta_0:
exp_factor = exp(-(abs(eta_local) - eta_0)**2./(2.*sigma_eta**2.))
eta_right = 0.5*(1. + eta_local/beam_rapidity)*exp_factor
return(eta_right)
def generate_3d_profile(data_path, ecm):
beam_rapidity = arctanh(sqrt(1. - 1./((ecm/2.)**2.)))
grid_eta = linspace(-beam_rapidity, beam_rapidity, grid_nrap)
for icen, cen_string in enumerate(centrality_list):
TA = loadtxt(
path.join(data_path, 'nuclear_thickness_TA_fromSd_order_2_C%s.dat'
% cen_string))
TB = loadtxt(
path.join(data_path, 'nuclear_thickness_TB_fromSd_order_2_C%s.dat'
% cen_string))
entropy_density = []
for ieta in range(len(grid_eta)):
eta_local = grid_eta[ieta]
temp_density = (
get_eta_factor_left(eta_local, beam_rapidity)*TA
+ get_eta_factor_right(eta_local, beam_rapidity)*TB)
entropy_density.append(temp_density)
with file('sdAvg_order_2_C%s_block_3d.dat' % cen_string, 'w') as outfile:
for slice_2d in entropy_density:
savetxt(outfile, slice_2d)
def print_help_message():
print "Usage : "
print(color.bold
+ "./generate_smooth_3d_profiles.py folder ecm "
+ color.end)
print "Usage of generate_smooth_3d_profiles.py command line arguments: "
print(color.bold + "-folder" + color.end + " folder path")
print(color.bold + "-ecm" + color.end
+ " collision energy (GeV): "
+ color.purple + "7.7, 11.5, 19.6, 27, 39, 62.4, 200, 2760, 5500"
+ color.end)
if __name__ == "__main__":
try:
data_path = path.abspath(str(sys.argv[1]))
ecm = float(sys.argv[2])
except(IndexError):
print_help_message()
exit(0)
generate_3d_profile(data_path, ecm)
|
chunshen1987/iEBE
|
EBE-Node/superMC/scripts/generate_3d_profiles/generate_smooth_3d_profiles.py
|
Python
|
gpl-3.0
| 3,034
|
[
"Gaussian"
] |
fb1c195b6b3a122bd301db0d06c6295549b2ffe54c714e45027ed24b3a505f3c
|
import os
import shutil
import copy
import glob
from GangaCore.testlib.GangaUnitTest import GangaUnitTest
from GangaCore.testlib.file_utils import generate_unique_temp_file
class TestLocalFileClient(GangaUnitTest):
"""test for sjid in filename names explain each test"""
_managed_files = []
# Num of sj in tests
sj_len = 3
# This sets up a LocalFileConfiguration which works by placing a file on local storage somewhere we can test using standard tools
LocalFileConfig = {'fileExtensions': [''],
'uploadOptions': {},
'backendPostprocess': {'LSF': 'client', 'LCG': 'client', 'ARC': 'client', 'Dirac': 'client',
'PBS': 'client', 'Interactive': 'client', 'Local': 'client', 'CREAM': 'client'}}
_ext = '.root'
def setUp(self):
"""
Configure the LocalFile for the test
"""
extra_opts=[('PollThread', 'autostart', 'False'),
('Local', 'remove_workdir', 'False'),
('TestingFramework', 'AutoCleanup', 'False'),
('Output', 'LocalFile', self.LocalFileConfig),
('Output', 'FailJobIfNoOutputMatched', 'True')]
super(TestLocalFileClient, self).setUp(extra_opts=extra_opts)
@staticmethod
def cleanUp():
""" Cleanup the current temp jobs """
from GangaCore.GPI import jobs
for j in jobs:
shutil.rmtree(j.backend.workdir, ignore_errors=True)
j.remove()
@classmethod
def tearDownTest(cls):
""" Cleanup the current temp objects """
for file_ in TestLocalFileClient._managed_files:
if os.path.isfile(file_):
os.unlink(file_)
else:
print(("ERROR REMOVING FILE: '%s'" % str(file_)))
TestLocalFileClient._managed_files = []
def test_a_testClientSideSubmit(self):
"""Test the client side code whilst stil using the Local backend"""
from GangaCore.GPI import LocalFile, Job, ArgSplitter
file_1 = generate_unique_temp_file(TestLocalFileClient._ext)
file_2 = generate_unique_temp_file(TestLocalFileClient._ext)
TestLocalFileClient._managed_files.append(file_1)
TestLocalFileClient._managed_files.append(file_2)
j = Job()
j.inputfiles = [LocalFile(file_1), LocalFile(file_2)]
j.splitter = ArgSplitter(args = [[_] for _ in range(TestLocalFileClient.sj_len)])
j.outputfiles = [LocalFile(namePattern='*'+TestLocalFileClient._ext)]
j.submit()
def test_b_testClientSideComplete(self):
"""Test the client side code whilst stil using the Local backend"""
from GangaCore.GPI import jobs
from GangaCore.GPIDev.Base.Proxy import stripProxy
from GangaTest.Framework.utils import sleep_until_completed
j = jobs[-1]
assert sleep_until_completed(j)
for sj in j.subjobs:
output_dir = stripProxy(sj).getOutputWorkspace(create=False).getPath()
assert os.path.isdir(output_dir)
# Check that the files were placed in the correct place on storage
for file_ in j.inputfiles:
for this_file in glob.glob(os.path.join(output_dir, file_.namePattern)):
assert os.path.isfile(this_file)
# Check that wildcard expansion happened correctly
assert len(stripProxy(sj).outputfiles[0].subfiles) == 2
assert len(sj.outputfiles) == 2
def test_c_testCopy(self):
from GangaCore.GPI import jobs, LocalFile
j = jobs[-1]
j2 = j.copy()
assert len(j2.outputfiles) == 1
assert j2.outputfiles[0] == LocalFile(namePattern='*'+TestLocalFileClient._ext)
assert len(j2.inputfiles) == 2
self.cleanUp()
class TestLocalFileWN(TestLocalFileClient):
"""test for sjid in filename names explain each test"""
LocalFileConfig = copy.deepcopy(TestLocalFileClient.LocalFileConfig)
LocalFileConfig['backendPostprocess']['Local'] = 'WN'
|
ganga-devs/ganga
|
ganga/GangaCore/test/GPI/FileTests/TestLocalFileClient.py
|
Python
|
gpl-3.0
| 4,115
|
[
"DIRAC"
] |
f01ec363cd6e27c4d3e00583f13b74c093616053b76f30b9dfb8a8b59212893f
|
import clientConfig as cConf
import Pyro4
from mmp_tracer_api import objID
from comsol_api import MMPComsolDummy
from mupif import PyroUtil, Property, PropertyID, FieldID, ValueType
import logging
logger = logging.getLogger()
import time as timeTime
start = timeTime.time()
logger.info('Timer started')
# locate nameserver
ns = PyroUtil.connectNameServer(nshost=cConf.nshost,
nsport=cConf.nsport,
hkey=cConf.hkey)
logger.info('NS connected: %s' % str(ns))
# Tunnels to different machines machine
mieTunnel = PyroUtil.sshTunnel(remoteHost=cConf.mieServer,
userName=cConf.mieUser,
localPort=cConf.mieNatPort,
remotePort=cConf.miePort,
sshClient=cConf.sshClient,
options=cConf.options,
sshHost=cConf.sshHost)
tracerTunnel = PyroUtil.sshTunnel(remoteHost=cConf.tracerServer,
userName=cConf.tracerUser,
localPort=cConf.tracerNatPort,
remotePort=cConf.tracerPort,
sshClient=cConf.sshClient,
options=cConf.options,
sshHost=cConf.sshHost)
comsolTunnel = PyroUtil.sshTunnel(remoteHost=cConf.mieServer,
userName=cConf.mieUser,
localPort=cConf.mieNatPort,
remotePort=cConf.miePort,
sshClient=cConf.sshClient,
options=cConf.options,
sshHost=cConf.sshHost)
mieApp = PyroUtil.connectApp(ns, cConf.mieID)
tracerApp = PyroUtil.connectApp(ns, cConf.tracerID)
comsolApp = PyroUtil.connectApp(ns, cConf.comsolID)
# Point data conversion to false. Speeds up testing
#tracerApp._convertPointData = False #does not work over pyro4... not exposed
logger.info('Applications loaded:')
print(mieApp)
print(tracerApp)
print(comsolApp)
# Connect fields
logger.info('Connecting Fields...')
# old way to connect fields:
fHeatSurf = comsolApp.getField(FieldID.FID_Thermal_absorption_surface, 0)
fHeatVol = comsolApp.getField(FieldID.FID_Thermal_absorption_volume, 0)
tracerApp.setField(fHeatSurf)
tracerApp.setField(fHeatVol)
logger.info('Fields connected')
# Connect properties
# Emission spectrum
import numpy as np
a = {}
A = np.loadtxt('../../mmp_tracer_api/data/EM_GREEN.dat')
a['wavelengths'] = A[:, 0]
a['intensities'] = A[:, 1]
em = Property.Property(value=a,
propID=PropertyID.PID_EmissionSpectrum,
valueType=ValueType.Scalar,
time=0.0,
units=None,
objectID=objID.OBJ_PARTICLE_TYPE_1)
tracerApp.setProperty(em)
# Excitation spectrum
b = {}
B = np.loadtxt('../../mmp_tracer_api/data/EX_GREEN.dat')
b['wavelengths'] = B[:, 0]
b['intensities'] = B[:, 1]
ex = Property.Property(value=b,
propID=PropertyID.PID_ExcitationSpectrum,
valueType=ValueType.Scalar,
time=0.0,
units=None,
objectID=objID.OBJ_PARTICLE_TYPE_1)
tracerApp.setProperty(ex)
# Absorption spectrum
c = {}
C = np.loadtxt('../../mmp_tracer_api/data/Abs_GREEN.dat')
c['wavelengths'] = C[:, 0]
c['intensities'] = C[:, 1]
aabs = Property.Property(value=c,
propID=PropertyID.PID_AsorptionSpectrum,
valueType=ValueType.Scalar,
time=0.0,
units=None,
objectID=objID.OBJ_PARTICLE_TYPE_1)
tracerApp.setProperty(aabs)
# Particle density
logger.info('Setting Properties...')
vDens = 0.00000003400
pDens = Property.Property(value=vDens,
propID=PropertyID.PID_ParticleNumberDensity,
valueType=ValueType.Scalar,
time=0.0,
units=None,
objectID=objID.OBJ_CONE)
tracerApp.setProperty(pDens)
# Number of rays to trace
pRays = Property.Property(value=100,
propID=PropertyID.PID_NumberOfRays,
valueType=ValueType.Scalar,
time=0.0,
units=None,
objectID=objID.OBJ_CONE)
tracerApp.setProperty(pRays)
logger.info('Properties set!')
# Solve mie
mieApp.solveStep(0)
# Connect mie properties to tracer
logger.info('Connecting Mie properties...')
pScat = mieApp.getProperty(PropertyID.PID_ScatteringCrossSections, 0,
objID.OBJ_PARTICLE_TYPE_1)
pPhase = mieApp.getProperty(PropertyID.PID_InverseCumulativeDist, 0,
objID.OBJ_PARTICLE_TYPE_1)
logger.info('Props received...')
tracerApp.setProperty(pScat, objID.OBJ_PARTICLE_TYPE_1)
tracerApp.setProperty(pPhase, objID.OBJ_PARTICLE_TYPE_1)
logger.info('Props connected')
# Solve tracer
tracerApp.solveStep(0, runInBackground=False)
# Connect tracer back to comsol
fHeatSurf = tracerApp.getField(FieldID.FID_Thermal_absorption_surface, 0)
fHeatVol = tracerApp.getField(FieldID.FID_Thermal_absorption_volume, 0)
comsolApp.setField(fHeatSurf)
comsolApp.setField(fHeatVol)
# Solve comsol
comsolApp.solveStep(0)
# Plot data to file
# logger.info("Saving vtk")
# v = fHeatVol.field2VTKData()
# v.tofile('testHeat.vtk')
'''
# Kill tunnels.
mieTunnel.kill()
tracerTunnel.kill()
# comsolTunnel.kill()
'''
|
ollitapa/MMP-TracerApi
|
Tests/NetworkTest/networkTest.py
|
Python
|
apache-2.0
| 5,712
|
[
"VTK"
] |
0a6ce461fd1a414cb75bd96a0c32a241fc8a56385ac59e6b033aa9ecaee19e6c
|
'''
Created on Jul 21, 2011
@author: mkiyer
Copyright (C) 2011 Matthew Iyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import logging
import os
import sys
import collections
import itertools
import operator
from chimerascan import pysam
from chimerascan.lib import config
from chimerascan.lib.chimera import DiscordantRead, Chimera, frags_to_encomp_string
from chimerascan.lib.gene_to_genome import build_tx_name_gene_map, build_genome_tx_trees
from chimerascan.lib.fragment_size_distribution import InsertSizeDistribution
from chimerascan.lib.seq import calc_homology
def parse_discordant_bedpe_by_transcript_pair(fh):
prev_tx5p, prev_tx3p = None,None
frags = []
for line in fh:
fields = line.strip().split('\t')
tx5p = fields[0]
tx3p = fields[3]
dr5p = DiscordantRead.from_list(fields[10].split("|"))
dr3p = DiscordantRead.from_list(fields[11].split("|"))
if (tx5p, tx3p) != (prev_tx5p, prev_tx3p):
if len(frags) > 0:
yield prev_tx5p, prev_tx3p, frags
frags = []
prev_tx5p, prev_tx3p = tx5p, tx3p
frags.append((dr5p, dr3p))
if len(frags) > 0:
yield tx5p, tx3p, frags
def calc_isize_prob(isize, isize_dist):
# find percentile of observing this insert size in the reads
isize_per = isize_dist.percentile_at_isize(isize)
# convert to a probability score (0.0-1.0)
isize_prob = 1.0 - (2.0 * abs(50.0 - isize_per))/100.0
return isize_prob
def choose_best_breakpoints(r5p, r3p, tx5p, tx3p, trim_bp, isize_dist):
best_breakpoints = set()
best_isize_prob = None
# iterate through 5' transcript exons
exon_iter_5p = reversed(tx5p.exons) if tx5p.strand == '-' else iter(tx5p.exons)
tx_end_5p = 0
for exon_num_5p,coords5p in enumerate(exon_iter_5p):
genome_start_5p, genome_end_5p = coords5p
exon_size_5p = genome_end_5p - genome_start_5p
tx_end_5p += exon_size_5p
# fast forward on 5' gene to first exon beyond read
if tx_end_5p < (r5p.aend - trim_bp):
continue
#print "tx end 5p", tx_end_5p, "exon_size_5p", exon_size_5p, "r5p.aend", r5p.aend, "trim_bp", trim_bp
# now have a candidate insert size between between 5' read and
# end of 5' exon
isize5p = tx_end_5p - r5p.pos
# iterate through 3' transcript
exon_iter_3p = reversed(tx3p.exons) if tx3p.strand == '-' else iter(tx3p.exons)
tx_start_3p = 0
local_best_breakpoints = set()
local_best_isize_prob = None
for exon_num_3p,coords3p in enumerate(exon_iter_3p):
genome_start_3p, genome_end_3p = coords3p
#print "\t", coords3p
# stop after going past read on 3' transcript
if tx_start_3p >= (r3p.pos + trim_bp):
break
# get another candidate insert size between start of 3'
# exon and 3' read
isize3p = r3p.aend - tx_start_3p
#print "\t", isize5p, isize3p, tx_end_5p, tx_start_3p
# compare the insert size against the known insert size
# distribution
isize_prob = calc_isize_prob(isize5p + isize3p, isize_dist)
if ((local_best_isize_prob is None) or
(isize_prob > local_best_isize_prob)):
local_best_isize_prob = isize_prob
local_best_breakpoints = set([(exon_num_5p, tx_end_5p,
exon_num_3p, tx_start_3p)])
elif (isize_prob == local_best_isize_prob):
local_best_breakpoints.add((exon_num_5p, tx_end_5p,
exon_num_3p, tx_start_3p))
tx_start_3p += genome_end_3p - genome_start_3p
# compare locally best insert size probability to global best
if ((best_isize_prob is None) or
(local_best_isize_prob > best_isize_prob)):
best_isize_prob = local_best_isize_prob
best_breakpoints = local_best_breakpoints
elif (local_best_isize_prob == best_isize_prob):
# for ties we keep all possible breakpoints
best_breakpoints.update(local_best_breakpoints)
return best_isize_prob, best_breakpoints
def extract_breakpoint_sequence(tx_name_5p, tx_end_5p,
tx_name_3p, tx_start_3p,
ref_fa, max_read_length,
homology_mismatches):
tx_start_5p = max(0, tx_end_5p - max_read_length + 1)
tx_end_3p = tx_start_3p + max_read_length - 1
# fetch sequence
seq5p = ref_fa.fetch(tx_name_5p, tx_start_5p, tx_end_5p).upper()
seq3p = ref_fa.fetch(tx_name_3p, tx_start_3p, tx_end_3p).upper()
# pad sequence if too short
if len(seq5p) < (max_read_length - 1):
logging.warning("Could not extract sequence of length >%d from "
"5' partner at %s:%d-%d, only retrieved "
"sequence of length %d" %
(max_read_length-1, tx_name_5p, tx_start_5p,
tx_end_5p, len(seq5p)))
# pad sequence
padding = (max_read_length - 1) - len(seq5p)
seq5p = ("N" * padding) + seq5p
if len(seq3p) < max_read_length - 1:
logging.warning("Could not extract sequence of length >%d from "
"3' partner at %s:%d-%d, only retrieved "
"sequence of length %d" %
(max_read_length-1, tx_name_3p, tx_start_3p,
tx_end_3p, len(seq3p)))
# pad sequence
padding = (max_read_length - 1) - len(seq3p)
seq3p = seq3p + ("N" * padding)
# if 5' partner continues along its normal transcript
# without fusing, get the sequence that would result
homolog_end_5p = tx_end_5p + max_read_length - 1
homolog_seq_5p = ref_fa.fetch(tx_name_5p, tx_end_5p, homolog_end_5p).upper()
# if 3' partner were to continue in the 5' direction,
# grab the sequence that would be produced
homolog_start_3p = max(0, tx_start_3p - max_read_length + 1)
homolog_seq_3p = ref_fa.fetch(tx_name_3p, homolog_start_3p, tx_start_3p).upper()
# count number of bases in common between downstream 5' sequence
# and the sequence of the 3' partner in the chimera
homology_right = calc_homology(homolog_seq_5p, seq3p,
homology_mismatches)
# count number of bases in common between upstream 3' sequence
# and the sequence of the 5' partner in the chimera
homology_left = calc_homology(homolog_seq_3p[::-1], seq5p[::-1],
homology_mismatches)
return seq5p, seq3p, homology_left, homology_right
def nominate_chimeras(index_dir, isize_dist_file, input_file, output_file,
trim_bp, max_read_length, homology_mismatches):
# read insert size distribution
isize_dist = InsertSizeDistribution.from_file(open(isize_dist_file))
# build a lookup table to get genomic intervals from transcripts
logging.debug("Reading gene information")
gene_file = os.path.join(index_dir, config.GENE_FEATURE_FILE)
tx_name_gene_map = build_tx_name_gene_map(gene_file, rname_prefix=None)
#genome_tx_trees = build_genome_tx_trees(gene_file)
# open the reference sequence fasta file
ref_fasta_file = os.path.join(index_dir, config.ALIGN_INDEX + ".fa")
ref_fa = pysam.Fastafile(ref_fasta_file)
# keep track of mapping from breakpoint sequence to breakpoint id
# this requires storing all breakpoint sequences in memory which is
# potentially expensive. TODO: investigate whether this should be
# moved to a separate sort-update-sort procedure
breakpoint_seq_name_map = {}
breakpoint_num = 1
# group discordant read pairs by gene
logging.debug("Parsing discordant reads")
chimera_num = 1
outfh = open(output_file, "w")
for tx_name_5p, tx_name_3p, frags in parse_discordant_bedpe_by_transcript_pair(open(input_file)):
# get gene information
tx5p = tx_name_gene_map[tx_name_5p]
tx3p = tx_name_gene_map[tx_name_3p]
# bin fragments into putative breakpoints
breakpoint_dict = collections.defaultdict(lambda: [])
for dr5p,dr3p in frags:
# given the insert size find the highest probability
# exon junction breakpoint between the two transcripts
isize_prob, breakpoints = \
choose_best_breakpoints(dr5p, dr3p, tx5p, tx3p,
trim_bp, isize_dist)
for breakpoint in breakpoints:
breakpoint_dict[breakpoint].append((dr5p, dr3p))
# iterate through breakpoints and build chimera candidates
for breakpoint,frags in breakpoint_dict.iteritems():
exon_num_5p, tx_end_5p, exon_num_3p, tx_start_3p = breakpoint
breakpoint_seq_5p, breakpoint_seq_3p, homology_left, homology_right = \
extract_breakpoint_sequence(config.GENE_REF_PREFIX + tx5p.tx_name, tx_end_5p,
config.GENE_REF_PREFIX + tx3p.tx_name, tx_start_3p,
ref_fa, max_read_length,
homology_mismatches)
tx3p_length = sum((end - start) for start,end in tx3p.exons)
# get unique breakpoint id based on sequence
breakpoint_seq = breakpoint_seq_5p + breakpoint_seq_3p
if breakpoint_seq in breakpoint_seq_name_map:
breakpoint_name = breakpoint_seq_name_map[breakpoint_seq]
else:
breakpoint_name = "B%07d" % (breakpoint_num)
breakpoint_seq_name_map[breakpoint_seq] = breakpoint_name
breakpoint_num += 1
# write gene, breakpoint, and raw reads to a file and follow the
# BEDPE format
gene_name_5p = '_'.join(tx5p.gene_name.split())
gene_name_3p = '_'.join(tx3p.gene_name.split())
fields = [tx5p.tx_name, 0, tx_end_5p, # chrom1, start1, end1
tx3p.tx_name, tx_start_3p, tx3p_length, # chrom2, start2, end2
"C%07d" % (chimera_num), # name
1.0, # pvalue
tx5p.strand, tx3p.strand, # strand1, strand2
gene_name_5p, gene_name_3p, # gene names
# exon interval information
'%d-%d' % (0, exon_num_5p),
'%d-%d' % (exon_num_3p, len(tx3p.exons)),
# breakpoint information
breakpoint_name,
breakpoint_seq_5p, breakpoint_seq_3p,
homology_left, homology_right,
# fragments
frags_to_encomp_string(frags),
# spanning reads
None]
print >>outfh, '\t'.join(map(str, fields))
chimera_num += 1
outfh.close()
ref_fa.close()
return config.JOB_SUCCESS
def main():
from optparse import OptionParser
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
parser = OptionParser("usage: %prog [options] <index> <isize_dist.txt> "
"<discordant_reads.srt.bedpe> <chimeras.txt>")
parser.add_option("--trim", dest="trim", type="int",
default=config.EXON_JUNCTION_TRIM_BP,
help="apply trimming when choosing exon boundaries to "
"to consider possible breakpoints")
parser.add_option("--max-read-length", dest="max_read_length", type="int",
default=100, metavar="N",
help="Reads in the BAM file are guaranteed to have "
"length less than N [default=%default]")
parser.add_option("--homology-mismatches", type="int",
dest="homology_mismatches",
default=config.BREAKPOINT_HOMOLOGY_MISMATCHES,
help="Number of mismatches to tolerate when computing "
"homology between gene and its chimeric partner "
"[default=%default]")
options, args = parser.parse_args()
index_dir = args[0]
isize_dist_file = args[1]
input_file = args[2]
output_file = args[3]
return nominate_chimeras(index_dir, isize_dist_file,
input_file, output_file,
options.trim,
options.max_read_length,
options.homology_mismatches)
if __name__ == '__main__':
sys.exit(main())
|
genome-vendor/chimerascan
|
chimerascan/pipeline/nominate_chimeras.py
|
Python
|
gpl-3.0
| 13,513
|
[
"pysam"
] |
dfee8ad3d5f898da1bbc982cbd8f72d51031ee5716c60c021cb8bde91152aad9
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""
Tests for the AMBER netcdf reader/writer code
"""
from mdtraj.formats import netcdf, NetCDFTrajectoryFile
import os, tempfile
from nose.tools import assert_raises
import numpy as np
import mdtraj as md
from mdtraj.testing import get_fn, eq, DocStringFormatTester, raises
TestDocstrings = DocStringFormatTester(netcdf, error_on_none=True)
fd, temp = tempfile.mkstemp(suffix='.nc')
def teardown_module(module):
"""remove the temporary file created by tests in this file
this gets automatically called by nose"""
os.close(fd)
os.unlink(temp)
def test_read_after_close():
f = NetCDFTrajectoryFile(get_fn('mdcrd.nc'))
yield lambda: eq(f.n_atoms, 223)
yield lambda: eq(f.n_frames, 101)
f.close()
# should be an ioerror if you read a file that's closed
assert_raises(IOError, lambda: f.read())
def test_shape():
xyz, time, boxlength, boxangles = NetCDFTrajectoryFile(get_fn('mdcrd.nc')).read()
yield lambda: eq(xyz.shape, (101, 223, 3))
yield lambda: eq(time.shape, (101,))
yield lambda: eq(boxlength, None)
yield lambda: eq(boxangles, None)
def test_read_chunk_1():
with NetCDFTrajectoryFile(get_fn('mdcrd.nc')) as f:
a, b, c, d = f.read(10)
e, f, g, h = f.read()
yield lambda: eq(len(a), 10)
yield lambda: eq(len(b), 10)
yield lambda: eq(len(e), 101-10)
yield lambda: eq(len(f), 101-10)
xyz = NetCDFTrajectoryFile(get_fn('mdcrd.nc')).read()[0]
yield lambda: eq(a, xyz[0:10])
yield lambda: eq(e, xyz[10:])
def test_read_chunk_2():
with NetCDFTrajectoryFile(get_fn('mdcrd.nc')) as f:
a, b, c, d = f.read(10)
e, f, g, h = f.read(100000000000)
yield lambda: eq(len(a), 10)
yield lambda: eq(len(b), 10)
yield lambda: eq(len(e), 101-10)
yield lambda: eq(len(f), 101-10)
xyz = NetCDFTrajectoryFile(get_fn('mdcrd.nc')).read()[0]
yield lambda: eq(a, xyz[0:10])
yield lambda: eq(e, xyz[10:])
def test_read_chunk_3():
# too big of a chunk should not be an issue
a = NetCDFTrajectoryFile(get_fn('mdcrd.nc')).read(1000000000)
b = NetCDFTrajectoryFile(get_fn('mdcrd.nc')).read()
eq(a[0], b[0])
def test_read_write_1():
xyz = np.random.randn(100, 3, 3)
time = np.random.randn(100)
boxlengths = np.random.randn(100, 3)
boxangles = np.random.randn(100, 3)
with NetCDFTrajectoryFile(temp, 'w', force_overwrite=True) as f:
f.write(xyz, time, boxlengths, boxangles)
with NetCDFTrajectoryFile(temp) as f:
a, b, c, d = f.read()
yield lambda: eq(a, xyz)
yield lambda: eq(b, time)
yield lambda: eq(c, boxlengths)
yield lambda: eq(d, boxangles)
def test_read_write_2():
xyz = np.random.randn(5, 22, 3)
time = np.random.randn(5)
with NetCDFTrajectoryFile(temp, 'w', force_overwrite=True) as f:
f.write(xyz, time)
with NetCDFTrajectoryFile(temp) as f:
rcoord, rtime, rlengths, rangles = f.read()
yield lambda: eq(rcoord, xyz)
yield lambda: eq(rtime, time)
yield lambda: eq(rlengths, None)
yield lambda: eq(rangles, None)
t = md.load(temp, top=get_fn('native.pdb'))
eq(t.unitcell_angles, None)
eq(t.unitcell_lengths, None)
def test_ragged_1():
# try first writing no cell angles/lengths, and then adding some
xyz = np.random.randn(100, 3, 3)
time = np.random.randn(100)
cell_lengths = np.random.randn(100, 3)
cell_angles = np.random.randn(100, 3)
with NetCDFTrajectoryFile(temp, 'w', force_overwrite=True) as f:
f.write(xyz, time)
assert_raises(ValueError, lambda: f.write(xyz, time, cell_lengths, cell_angles))
def test_ragged_2():
# try first writing no cell angles/lengths, and then adding some
xyz = np.random.randn(100, 3, 3)
time = np.random.randn(100)
cell_lengths = np.random.randn(100, 3)
cell_angles = np.random.randn(100, 3)
#from mdtraj.formats import HDF5TrajectoryFile
with NetCDFTrajectoryFile(temp, 'w', force_overwrite=True) as f:
f.write(xyz, time, cell_lengths, cell_angles)
assert_raises(ValueError, lambda: f.write(xyz, time))
def test_read_write_25():
xyz = np.random.randn(100, 3, 3)
time = np.random.randn(100)
with NetCDFTrajectoryFile(temp, 'w', force_overwrite=True) as f:
f.write(xyz, time)
f.write(xyz, time)
with NetCDFTrajectoryFile(temp) as f:
a, b, c, d = f.read()
yield lambda: eq(a[0:100], xyz)
yield lambda: eq(b[0:100], time)
yield lambda: eq(c, None)
yield lambda: eq(d, None)
yield lambda: eq(a[100:], xyz)
yield lambda: eq(b[100:], time)
yield lambda: eq(c, None)
yield lambda: eq(d, None)
def test_write_3():
xyz = np.random.randn(100, 3, 3)
time = np.random.randn(100)
with NetCDFTrajectoryFile(temp, 'w', force_overwrite=True) as f:
# you can't supply cell_lengths without cell_angles
assert_raises(ValueError, lambda: f.write(np.random.randn(100, 3, 3), cell_lengths=np.random.randn(100, 3)))
# or the other way aroun
assert_raises(ValueError, lambda: f.write(np.random.randn(100, 3, 3), cell_angles=np.random.randn(100, 3)))
def test_n_atoms():
with NetCDFTrajectoryFile(temp, 'w', force_overwrite=True) as f:
f.write(np.random.randn(1,11,3))
with NetCDFTrajectoryFile(temp) as f:
eq(f.n_atoms, 11)
def test_do_overwrite():
with open(temp, 'w') as f:
f.write('a')
with NetCDFTrajectoryFile(temp, 'w', force_overwrite=True) as f:
f.write(np.random.randn(10,5,3))
@raises(IOError)
def test_do_overwrite():
with open(temp, 'w') as f:
f.write('a')
with NetCDFTrajectoryFile(temp, 'w', force_overwrite=False) as f:
f.write(np.random.randn(10,5,3))
def test_trajectory_save_load():
t = md.load(get_fn('native.pdb'))
t.unitcell_lengths = 1 * np.ones((1, 3))
t.unitcell_angles = 90 * np.ones((1, 3))
t.save(temp)
t2 = md.load(temp, top=t.topology)
eq(t.xyz, t2.xyz)
eq(t.unitcell_lengths, t2.unitcell_lengths)
|
kyleabeauchamp/mdtraj
|
mdtraj/tests/test_netcdf.py
|
Python
|
lgpl-2.1
| 7,177
|
[
"Amber",
"MDTraj",
"NetCDF"
] |
ff0138999ef29f92d8f55000d69665c8e265de74389ffb577c966d8929015d26
|
# Orca
#
# Copyright 2005-2009 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Displays a GUI for the Orca Find window"""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2009 Sun Microsystems Inc."
__license__ = "LGPL"
import os
import sys
from gi.repository import Gtk
import locale
from . import find
from . import guilabels
from . import orca_gtkbuilder
from . import orca_state
from . import orca_platform
OS = None
class OrcaFindGUI(orca_gtkbuilder.GtkBuilderWrapper):
def __init__(self, fileName, windowName):
"""Initialize the Orca configuration GUI.
Arguments:
- fileName: name of the GtkBuilder file.
- windowName: name of the component to get from the GtkBuilder file.
"""
orca_gtkbuilder.GtkBuilderWrapper.__init__(self, fileName, windowName)
# Initialize variables to None to keep pylint happy.
#
self.activeScript = None
self.caseSensitive = None
self.matchEntireWord = None
self.searchBackwards = None
self.searchString = None
self.startAtTop = None
self.windowWrap = None
def init(self):
# Initialize the dialog box controls.
self.searchString = ""
self.searchBackwards = False
self.caseSensitive = False
self.matchEntireWord = False
self.windowWrap = True
self.startAtTop = False
self.activeScript = orca_state.activeScript
def showGUI(self):
"""Show the Orca Find dialog. This assumes that the GUI has
already been created.
"""
findDialog = self.get_widget("findDialog")
ts = orca_state.lastInputEvent.timestamp
if ts == 0:
ts = Gtk.get_current_event_time()
findDialog.present_with_time(ts)
# Populate the dialog box from the previous searchQuery, should
# one exist. Note: This is necessary because we are destroying
# the dialog (rather than merely hiding it) before performing the
# search.
try:
searchForEntry = self.get_widget("searchForEntry")
searchForEntry.set_text(orca_state.searchQuery.searchString)
searchForEntry.select_region(0, len(searchForEntry.get_text()))
if orca_state.searchQuery.startAtTop:
self.get_widget("topRadioButton").set_active(True)
self.get_widget("matchCaseCheckbox").set_active(\
orca_state.searchQuery.caseSensitive)
self.get_widget("matchEntireWordCheckbox").set_active(\
orca_state.searchQuery.matchEntireWord)
self.get_widget("wrapAroundCheckbox").set_active(\
orca_state.searchQuery.windowWrap)
self.get_widget("searchBackwardsCheckbox").set_active(\
orca_state.searchQuery.searchBackwards)
except:
pass
def searchForEntryChanged(self, widget):
"""Signal handler for the "changed" signal for the
searchForEntry GtkEntry widget. The user has changed
the string to be searched for.
Arguments:
- widget: the component that generated the signal.
"""
self.searchString = widget.get_text()
findButton = self.get_widget("findButton")
if len(self.searchString) > 0:
findButton.set_sensitive(True)
else:
findButton.set_sensitive(False)
def startingPointChanged(self, widget):
"""Signal handler for the "toggled" signal for the
currentLocationRadioButton or topRadioButton GtkRadioButton
widgets. The user has toggled the starting point for the search.
Arguments:
- widget: the component that generated the signal.
"""
if widget.get_active():
if widget.get_label() == guilabels.FIND_START_AT_CURRENT_LOCATION:
self.startAtTop = False
else:
self.startAtTop = True
def matchCaseChecked(self, widget):
"""Signal handler for the "toggled" signal for the
matchCaseCheckbox GtkCheckButton widget. The user has
[un]checked the "Match Case" checkbox.
Arguments:
- widget: the component that generated the signal.
"""
self.caseSensitive = widget.get_active()
def matchEntireWordChecked(self, widget):
"""Signal handler for the "toggled" signal for the
matchEntireWordCheckbox GtkCheckButton widget.
The user has [un]checked the "Match entire word"
checkbox.
Arguments:
- widget: the component that generated the signal.
"""
self.matchEntireWord = widget.get_active()
def searchBackwardsChecked(self, widget):
"""Signal handler for the "toggled" signal for the
searchBackwardsCheckbox GtkCheckButton widget.
The user has [un]checked the "Search backwards"
checkbox.
Arguments:
- widget: the component that generated the signal.
"""
self.searchBackwards = widget.get_active()
def wrapAroundChecked(self, widget):
"""Signal handler for the "toggled" signal for the
wrapAroundCheckbox GtkCheckButton widget. The user has
[un]checked the "Wrap around" checkbox.
Arguments:
- widget: the component that generated the signal.
"""
self.windowWrap = widget.get_active()
def closeButtonClicked(self, widget):
"""Signal handler for the "clicked" signal for the cancelButton
GtkButton widget. The user has clicked the Cancel button.
Hide the dialog.
Arguments:
- widget: the component that generated the signal.
"""
self.get_widget("findDialog").hide()
def findButtonClicked(self, widget):
"""Signal handler for the "clicked" signal for the findButton
GtkButton widget. The user has clicked the Find button.
Call the method to begin the search.
Arguments:
- widget: the component that generated the signal.
"""
orca_state.searchQuery = find.SearchQuery()
orca_state.searchQuery.searchString = self.searchString
orca_state.searchQuery.searchBackwards = self.searchBackwards
orca_state.searchQuery.caseSensitive = self.caseSensitive
orca_state.searchQuery.matchEntireWord = self.matchEntireWord
orca_state.searchQuery.startAtTop = self.startAtTop
orca_state.searchQuery.windowWrap = self.windowWrap
self.activeScript.findCommandRun = True
# Merely hiding the dialog causes the find to take place before
# the original window has fully regained focus.
self.get_widget("findDialog").destroy()
def findDialogDestroyed(self, widget):
"""Signal handler for the "destroyed" signal for the findDialog
GtkWindow widget. Reset OS to None.
Arguments:
- widget: the component that generated the signal.
"""
global OS
OS = None
def showFindUI():
global OS
if not OS:
uiFile = os.path.join(orca_platform.datadir,
orca_platform.package,
"ui",
"orca-find.ui")
OS = OrcaFindGUI(uiFile, "findDialog")
OS.init()
OS.showGUI()
def main():
locale.setlocale(locale.LC_ALL, '')
showFindUI()
Gtk.main()
sys.exit(0)
if __name__ == "__main__":
main()
|
pvagner/orca
|
src/orca/orca_gui_find.py
|
Python
|
lgpl-2.1
| 8,319
|
[
"ORCA"
] |
809df005b681db0be2faa94aa4da193b802b14c50504781da8d05b2c7d952d4c
|
import math
import vtk
def main():
# 3D source sphere
sphereSource = vtk.vtkSphereSource()
sphereSource.SetPhiResolution(30)
sphereSource.SetThetaResolution(30)
sphereSource.SetCenter(40, 40, 0)
sphereSource.SetRadius(20)
# generate circle by cutting the sphere with an implicit plane
# (through its center, axis-aligned)
circleCutter = vtk.vtkCutter()
circleCutter.SetInputConnection(sphereSource.GetOutputPort())
cutPlane = vtk.vtkPlane()
cutPlane.SetOrigin(sphereSource.GetCenter())
cutPlane.SetNormal(0, 0, 1)
circleCutter.SetCutFunction(cutPlane)
stripper = vtk.vtkStripper()
stripper.SetInputConnection(circleCutter.GetOutputPort()) # valid circle
stripper.Update()
# that's our circle
circle = stripper.GetOutput()
# write circle out
polyDataWriter = vtk.vtkXMLPolyDataWriter()
polyDataWriter.SetInputData(circle)
polyDataWriter.SetFileName("circle.vtp")
polyDataWriter.SetCompressorTypeToNone()
polyDataWriter.SetDataModeToAscii()
polyDataWriter.Write()
# prepare the binary image's voxel grid
whiteImage = vtk.vtkImageData()
bounds = [0] * 6
circle.GetBounds(bounds)
spacing = [0] * 3 # desired volume spacing
spacing[0] = 0.5
spacing[1] = 0.5
spacing[2] = 0.5
whiteImage.SetSpacing(spacing)
# compute dimensions
dim = [0] * 3
for i in range(3):
dim[i] = int(math.ceil((bounds[i * 2 + 1] - bounds[i * 2]) / spacing[i])) + 1
if dim[i] < 1:
dim[i] = 1
whiteImage.SetDimensions(dim)
whiteImage.SetExtent(0, dim[0] - 1, 0, dim[1] - 1, 0, dim[2] - 1)
origin = [0] * 3
# NOTE: I am not sure whether or not we had to add some offset!
origin[0] = bounds[0] # + spacing[0] / 2
origin[1] = bounds[2] # + spacing[1] / 2
origin[2] = bounds[4] # + spacing[2] / 2
whiteImage.SetOrigin(origin)
whiteImage.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 1)
# fill the image with foreground voxels:
inval = 255
outval = 0
count = whiteImage.GetNumberOfPoints()
# for (vtkIdType i = 0 i < count ++i)
for i in range(count):
whiteImage.GetPointData().GetScalars().SetTuple1(i, inval)
# sweep polygonal data (this is the important thing with contours!)
extruder = vtk.vtkLinearExtrusionFilter()
extruder.SetInputData(circle)
extruder.SetScaleFactor(1.0)
# extruder.SetExtrusionTypeToNormalExtrusion()
extruder.SetExtrusionTypeToVectorExtrusion()
extruder.SetVector(0, 0, 1)
extruder.Update()
# polygonal data -. image stencil:
pol2stenc = vtk.vtkPolyDataToImageStencil()
pol2stenc.SetTolerance(0) # important if extruder.SetVector(0, 0, 1) !!!
pol2stenc.SetInputConnection(extruder.GetOutputPort())
pol2stenc.SetOutputOrigin(origin)
pol2stenc.SetOutputSpacing(spacing)
pol2stenc.SetOutputWholeExtent(whiteImage.GetExtent())
pol2stenc.Update()
# cut the corresponding white image and set the background:
imgstenc = vtk.vtkImageStencil()
imgstenc.SetInputData(whiteImage)
imgstenc.SetStencilConnection(pol2stenc.GetOutputPort())
imgstenc.ReverseStencilOff()
imgstenc.SetBackgroundValue(outval)
imgstenc.Update()
imageWriter = vtk.vtkMetaImageWriter()
imageWriter.SetFileName("labelImage.mhd")
imageWriter.SetInputConnection(imgstenc.GetOutputPort())
imageWriter.Write()
imageWriter = vtk.vtkPNGWriter()
imageWriter.SetFileName("labelImage.png")
imageWriter.SetInputConnection(imgstenc.GetOutputPort())
imageWriter.Write()
if __name__ == '__main__':
main()
|
lorensen/VTKExamples
|
src/Python/PolyData/PolyDataContourToImageData.py
|
Python
|
apache-2.0
| 3,633
|
[
"VTK"
] |
8fff20629662a17d7d446aae2c33563a433b4ab3c80efd5e06fd3632c1884ffa
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Starting template for Google App Engine applications.
Use this project as a starting point if you are just beginning to build a Google
App Engine project. Remember to fill in the OAuth 2.0 client_id and
client_secret which can be obtained from the Developer Console
<https://code.google.com/apis/console/>
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import logging
import os
import pickle
from apiclient.discovery import build
from oauth2client.appengine import CredentialsProperty
from oauth2client.appengine import StorageByKeyName
from oauth2client.client import OAuth2WebServerFlow
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp.util import login_required
# Set up a Flow object to be used if we need to authenticate. This
# sample uses OAuth 2.0, and we set up the OAuth2WebServerFlow with
# the information it needs to authenticate. Note that it is called
# the Web Server Flow, but it can also handle the flow for native
# applications <http://code.google.com/apis/accounts/docs/OAuth2.html#IA>
# The client_id and client_secret are copied from the Identity tab on
# the Google APIs Console <http://code.google.com/apis/console>
FLOW = OAuth2WebServerFlow(
client_id='<client id goes here>',
client_secret='<client secret goes here>',
scope='https://www.googleapis.com/auth/buzz',
user_agent='my-sample-app/1.0')
class Credentials(db.Model):
credentials = CredentialsProperty()
class MainHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
credentials = StorageByKeyName(
Credentials, user.user_id(), 'credentials').get()
if not credentials or credentials.invalid:
return begin_oauth_flow(self, user)
http = credentials.authorize(httplib2.Http())
# Build a service object for interacting with the API. Visit
# the Google APIs Console <http://code.google.com/apis/console>
# to get a developerKey for your own application.
service = build("buzz", "v1", http=http)
followers = service.people().list(
userId='@me', groupId='@followers').execute()
text = 'Hello, you have %s followers!' % followers['totalResults']
path = os.path.join(os.path.dirname(__file__), 'welcome.html')
self.response.out.write(template.render(path, {'text': text }))
def begin_oauth_flow(request_handler, user):
callback = request_handler.request.relative_url('/oauth2callback')
authorize_url = FLOW.step1_get_authorize_url(callback)
# Here we are using memcache to store the flow temporarily while the user
# is directed to authorize our service. You could also store the flow
# in the datastore depending on your utilization of memcache, just remember
# in that case to clean up the flow after you are done with it.
memcache.set(user.user_id(), pickle.dumps(FLOW))
request_handler.redirect(authorize_url)
class OAuthHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
flow = pickle.loads(memcache.get(user.user_id()))
# This code should be ammended with application specific error
# handling. The following cases should be considered:
# 1. What if the flow doesn't exist in memcache? Or is corrupt?
# 2. What if the step2_exchange fails?
if flow:
credentials = flow.step2_exchange(self.request.params)
StorageByKeyName(
Credentials, user.user_id(), 'credentials').put(credentials)
self.redirect("/")
else:
# Add application specific error handling here.
pass
def main():
application = webapp.WSGIApplication(
[
('/', MainHandler),
('/oauth2callback', OAuthHandler)
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
MapofLife/MOL
|
earthengine/google-api-python-client/samples/new_project_template/main.py
|
Python
|
bsd-3-clause
| 4,625
|
[
"VisIt"
] |
50cf840e33627053df9a0542bf271e0c0c56fd3c307d546b1c9460c48943fd1d
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import ctypes
import numpy
try:
# Not normally needed. Not available in demo app.
import hotshot
except:
pass
# Attempt to import OpenCV's ctypes-based bindings
try:
from opencv.cvtypes import cv
except:
cv = None
from StringIO import StringIO
from PIL import (Image,
ImageChops)
from nupic.regions.PyRegion import PyRegion, RealNumpyDType
from nupic.regions.Spec import *
# Global counter used for some debugging operations
id = 0
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# GaborNode
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
class GaborNode2(PyRegion):
"""
Performs dense Gabor filtering upon a multi-resolution grid.
"""
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# Class constants
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# The minimum filter size dimension (3x3)
minFilterDim = 3
# The minimum filter size dimension (3x3)
minNumOrients = 0
# List of filter dimensions supported by the optimized
# C library
_optimizedFilterDims = [5, 7, 9, 11, 13]
# Valid parameter values
_validValues = {
'phaseMode': ('single', 'dual'),
'targetType': ('edge', 'line'),
'boundaryMode': ('constrained', 'sweepOff'),
'normalizationMethod': ('fixed', 'max', 'mean'),
'postProcessingMethod': ('raw', 'sigmoid', 'threshold'),
'nta_morphologyMethod': ('best', 'opencv', 'nta'),
}
# Default parameter values
_defaults = {
# Documented parameters:
'filterDim': 9,
'numOrientations': 4,
'phaseMode': 'single',
'centerSurround': False,
'targetType': 'edge',
'gainConstant': 1.0,
'normalizationMethod': 'fixed',
'perPlaneNormalization': False,
'perPhaseNormalization': True,
'postProcessingMethod': 'raw',
'postProcessingSlope': 1.0,
'postProcessingCenter': 0.5,
'postProcessingMin': 0.0,
'postProcessingMax': 1.0,
'zeroThresholdOut': 0.0,
'boundaryMode': 'constrained',
'offImagePixelValue': 0,
'suppressOutsideBox': True,
'forceBoxContraction': False,
'suppressByAlpha': False,
'logPrefix': None,
# Undocumented parameters:
'nta_aspectRatio': 0.3,
'nta_effectiveWidth': 4.5,
'nta_wavelength': 5.6,
'nta_lobeSuppression': True,
'nta_debugLogBuffers': False,
'nta_morphologyMethod': 'best',
}
# Our C implementation performs the 2D convolution using
# integer math, but scales the operands to preserve
# precision. The scaling is done by left shifting the Gabor
# filter coefficients by a fixed number of bits:
_integerMathShifts = 12 # 2^12 = 4096
_integerMathScale = 1 << _integerMathShifts
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# Public API calls
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def __init__(self,
# Filter size:
filterDim=None,
# Filter responses:
numOrientations=None,
phaseMode=None,
centerSurround=None,
targetType=None,
# Normalization:
gainConstant=None,
normalizationMethod=None,
perPlaneNormalization=None,
perPhaseNormalization=None,
# Post-processing:
postProcessingMethod=None,
postProcessingSlope=None,
postProcessingCenter=None,
postProcessingMin=None,
postProcessingMax=None,
zeroThresholdOut=None,
# Bounding effects:
boundaryMode=None,
offImagePixelValue=None,
suppressOutsideBox=None,
forceBoxContraction=None,
suppressByAlpha=None,
# Logging
logPrefix=None,
# Additional keywords
**keywds
):
"""
@param filterDim -- The size (in pixels) of both the width and height of the
gabor filters. Defaults to 9x9.
@param numOrientations -- The number of gabor filter orientations to produce.
The half-circle (180 degrees) of rotational angle will be evenly partitioned.
Defaults to 4, which produces a gabor bank containing filters oriented
at 0, 45, 90, and 135 degrees.
@param phaseMode -- The number of separate phases to compute per orientation.
Valid values are: 'single' or 'dual'. In 'single', responses to each such
orientation are rectified by absolutizing them; i.e., a 90-degree edge
will produce the same responses as a 270-degree edge, and the two
responses will be indistinguishable. In "dual" mode, the responses to
each orientation are rectified by clipping at zero, and then creating
a second output response by inverting the raw response and again clipping
at zero; i.e., a 90-degree edge will produce a response only in the
90-degree-oriented plane, and a 270-degree edge will produce a response
only the dual phase plane associated with the 90-degree plane (an
implicit 270-degree plane.) Default is 'single'.
@param centerSurround -- Controls whether an additional filter corresponding to
a non-oriented "center surround" response is applied to the image.
If phaseMode is "dual", then a second "center surround" response plane
is added as well (the inverted version of the center-surround response.)
Defaults to False.
@param targetType -- The preferred "target" of the gabor filters. A value of
'line' specifies that line detectors (peaks in the center and troughs
on either side) are to be used. A value of 'edge' specifies that edge
detectors (with a peak on one side and a trough on the other) are to
be used. Default is 'edge'.
@param gainConstant -- A multiplicative amplifier that is applied to the gabor
responses after any normalization. Defaults to 1.0; larger values
increase the sensitivity to edges.
@param normalizationMethod -- Controls the method by which responses are
normalized on a per image (and per scale) basis. Accepts the following
three legal values:
"fixed": No response normalization;
"max": Applies a global gain value to the responses so that the
max response equals the value of 'gainConstant'
"mean": Applies a global gain value to the responses so that the
mean response equals the value of 'gainConstant'
Default is 'fixed'.
@param perPlaneNormalization -- Controls whether normalization (as specified by
'normalizationMethod') is applied globally across all response planes
(for a given scale), or individually to each response plane. Default
is False. Note: this parameter is ignored if normalizationMethod is "fixed".
@param perPhaseNormalization -- Controls whether normalization (as specified by
'normalizationMethod') is applied globally across both phases for a
particular response orientation and scale, or individually to each
phase of the response. Default is True. Note: this parameter is
ignored if normalizationMethod is "fixed".
@param postProcessingMethod -- Controls what type of post-processing (if any)
is to be performed on the normalized responses. Valid value are:
"raw": No post-processing is performed; final output values are
unmodified after normalization
"sigmoid": Passes normalized output values through a sigmoid function
parameterized by 'postProcessingSlope' and 'postProcessingCenter'.
"threshold": Passes normalized output values through a piecewise linear
thresholding function parameterized by 'postProcessingMin'
and 'postProcessingMax'.
@param postProcessingSlope -- Controls the slope (steepness) of the sigmoid
function used when 'postProcessingMethod' is set to 'sigmoid'.
@param postProcessingCenter -- Controls the center point of the sigmoid function
used when 'postProcessingMethod' is set to 'sigmoid'.
@param postProcessingMin -- If 'postProcessingMethod' is set to 'threshold', all
normalized response values less than 'postProcessingMin' are suppressed to zero.
@param postProcessingMax -- If 'postProcessingMethod' is set to 'threshold', all
normalized response values greater than 'postProcessingMax' are clamped to one.
@param zeroThresholdOut -- if all outputs of a gabor node are below this threshold,
they will all be driven to absolute 0. This is useful in conjunction with
using the product mode/don't care spatial pooler which needs to know when
an input should be treated as 0 vs being normalized to sum to 1.
@param boundaryMode -- Controls how GaborNode deals with boundary effects. Accepts
two valid parameters:
'constrained' -- Gabor responses are normally only computed for image locations
that are far enough from the edge of the input image so that the entire
filter mask fits within the input image. Thus, the spatial dimensions of
the output gabor maps will be smaller than the input image layers.
'sweepOff' -- Gabor responses will be generated at every location within
the input image layer. Thus, the spatial dimensions of the output gabor
maps will be identical to the spatial dimensions of the input image.
For input image locations that are near the edge (i.e., a portion of
the gabor filter extends off the edge of the input image), the values
of pixels that are off the edge of the image are taken to be as specifed
by the parameter 'offImagePixelValue'.
Default is 'constrained'.
@param offImagePixelValue -- If 'boundaryMode' is set to 'sweepOff', then this
parameter specifies the value of the input pixel to use for "filling"
enough image locations outside the bounds of the original image.
Ignored if 'boundaryMode' is 'constrained'. Default value is 0.
@param suppressOutsideBox -- If True, then gabor responses outside of the bounding
box (provided from the sensor) are suppressed. Internally, the bounding
box is actually expanded by half the filter dimension (respecting the edge
of the image, of course) so that responses can be computed for all image
locations within the original bounding box.
@param forceBoxContraction -- Fine-tunes the behavior of bounding box suppression.
If False (the default), then the bounding box will only be 'contracted'
(by the half-width of the filter) in the dimenion(s) in which it is not
the entire span of the image. If True, then the bounding box will be
contracted unconditionally.
@param suppressByAlpha -- A boolean that, if True, instructs GaborNode to use
the pixel-accurate alpha mask received on the input 'validAlphaIn' for
the purpose of suppression of responses.
@param logPrefix -- If non-None, causes the response planes at each scale, and
for each input image, to be written to disk using the specified prefix
for the name of the log images. Default is None (no such logging.)
"""
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
#| The following parameters are for advanced configuration and unsupported at this time |
#| They may be specified via keyword arguments only. |
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
#
# @param nta_aspectRatio -- Controls how "fat" (i.e., how oriented) the Gabor
# filters are. A value of 1 would produce completely non-oriented
# (circular) filters; smaller values will produce a more oriented
# filter. Default is 0.3.
#
# @param nta_effectiveWidth -- Controls the rate of exponential drop-off in
# the Gaussian component of the Gabor filter. Default is 4.5.
#
# @param nta_wavelength -- Controls the frequency of the sinusoidal component
# of the Gabor filter. Default is 5.6.
#
# @param nta_lobeSuppression -- Controls whether or not the secondary lobes of the
# Gabor filters are suppressed. The suppression is performed based
# on the radial distance from the oriented edge to which the Gabor
# filter is tuned. If True, then the secondary lobes produced
# by the pure mathematical Gabor equation will be suppressed
# and have no effect; if False, then the pure mathematical
# Gabor equation (digitized into discrete sampling points, of
# course) will be used. Default is True.
#
# @param nta_debugLogBuffers -- If enabled, causes internal memory buffers used
# C implementation to be dumped to disk after each compute()
# cycle as an aid in the debugging of the C code path.
#
# @param nta_morphologyMethod -- Controls the method to use for performing
# morphological operations (erode or dilate) upon the
# valid alpha masks. Legal values are: 'opencv' (use the
# faster OpenCV routines), 'nta' (use the slower routines,
# or 'best' (use OpenCV if it is available on the platform,
# otherwise use the slower routines.)
#
# ------------------------------------------------------
# Handle hidden/undocumented parameters
for paramName in [p for p in self._defaults if self._isHiddenParam(p)]:
exec("%s = keywds.pop('%s', None)" % (paramName, paramName))
# ------------------------------------------------------
# Assign default values to missing parameters
for paramName, paramValue in self._defaults.items():
if eval(paramName) is None:
exec("%s = paramValue" % paramName)
# ------------------------------------------------------
# Handle deprecated parameters
# Deprecated: numOrients
numOrients = keywds.pop('numOrients', None)
if numOrients:
print "WARNING: 'numOrients' has been deprecated and replaced with 'numOrientations'"
if numOrientations is None:
numOrientations = numOrients
elif numOrients != numOrientations:
print "WARNING: 'numOrients' (%s) is inconsistent with 'numOrientations' (%s) and will be ignored" % \
(str(numOrients), str(numOrientations))
# Deprecated: filterPhase
filterPhase = keywds.pop('filterPhase', None)
if filterPhase:
print "WARNING: 'filterPhase' has been deprecated and replaced with 'targetType'"
if targetType is None:
targetType = filterPhase
elif filterPhase != targetType:
print "WARNING: 'filterPhase' (%s) is inconsistent with 'targetType' (%s) and will be ignored" % \
(str(filterPhase), str(targetType))
# Deprecated: nta_edgeMode
nta_edgeMode = keywds.pop('nta_edgeMode', None)
if nta_edgeMode:
print "WARNING: 'nta_edgeMode' has been deprecated and replaced with 'edgeMode'"
if edgeMode is None:
edgeMode = nta_edgeMode
elif nta_edgeMode != edgeMode:
print "WARNING: 'nta_edgeMode' (%s) is inconsistent with 'edgeMode' (%s) and will be ignored" % \
(str(nta_edgeMode), str(edgeMode))
# Deprecated: lateralInhibition
lateralInhibition = keywds.pop('nta_lateralInhibition', None)
if lateralInhibition:
print "WARNING: 'lateralInhibition' has been deprecated and will not be supported in future releases"
# Deprecated: validityShrinkage
validityShrinkage = keywds.pop('validityShrinkage', None)
if validityShrinkage:
print "WARNING: 'validityShrinkage' has been deprecated and replaced with 'suppressOutsideBox'"
if suppressOutsideBox is None:
suppressOutsideBox = (validityShrinkage >= 0.0)
elif suppressOutsideBox != (validityShrinkage >= 0.0):
print "WARNING: 'validityShrinkage' (%s) is inconsistent with 'suppressOutsideBox' (%s) and will be ignored" % \
(str(validityShrinkage), str(suppressOutsideBox))
self._numScales = None
self.nta_phaseIndex = 0
self._inputPyramidTopology = None
self._outputPyramidTopology = None
self._topDownCombiner = None
self._tdNumParents = None
self._enabledNodes = []
self._nodesWithReceptiveField = None
# These are cached inputs/outputs used for detecting/skipping either the
# bottom up or top down compute to improve performance.
self._cachedRFInput = None
self._cachedBUInput = None
self._cachedBUOutput = None
self._cachedTDInput = None
self._cachedTDOutput = None
self._cachedResetIn = None
self._cachedValidRegionIn = None
self._cachedValidRegionOut = None
# Profiling information
self._profileObj = None
self._iterations = 0
# No longer neede for receptivefields_test, but still needed to satisfy
# an assertion in _checkEphemeralMembers
if not hasattr(self, "_inputSplitter"):
self._inputSplitter = None
self._rfMask = None
self._rfSize = None
self._rfInvLenY = None
self._rfCenterX = None
self._rfCenterY = None
self._rfMinX = None
self._rfMinY = None
self._rfInvLenX = None
self._rfMaxX = None
self._rfMaxY = None
self._initEphemerals()
# ------------------------------------------------------
# Validate each parameter
for paramName in self._defaults.keys():
self._validate(paramName, eval(paramName))
# ------------------------------------------------------
# Store each parameter value
for paramName in self._defaults.keys():
# Hidden parameters have the 'nta_' prefix stripped
#if self._isHiddenParam(paramName):
# internalName = paramName[4:]
#else:
# internalName = paramName
internalName = self._stripHidingPrefixIfPresent(paramName)
exec("self._%s = %s" % (internalName, paramName))
# ------------------------------------------------------
# Perform additional validations that operate on
# combinations/interactions of parameters
self._doHolisticValidation()
# ------------------------------------------------------
# Set up internal state
# This node always get its input as a padded image cube from the ImageSensor
# It may change in the future when ImageSensor supports packed image pyramids
self._gaborBank = None
# Generation of response images must be explicitly enabled
self.disableResponseImages()
# This node type is non-learning, and thus begins life in 'infer' mode.
# This is only needed because our base class requires it.
self._stage = 'infer'
# We are always connected to an image sensor with padded pixels
self._inputPyramidFormat = 'padded'
# Store the number of output planes we'll produce
self._numPlanes = self.getNumPlanes()
# Initially, we do not generate response images
self._makeResponseImages = False
# Where we keep the maxTopDownOut for every node
self._maxTopDownOut = []
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _stripHidingPrefixIfPresent(self, paramName):
"""
If the named parameter is hidden, strip off the
leading "nta_" prefix.
"""
if self._isHiddenParam(paramName):
return paramName[4:]
else:
return paramName
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _isHiddenParam(self, paramName):
"""
Utility method for returning True if 'paramName' is the name
of a hidden parameter.
"""
return paramName.find('nta_') == 0
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def getOutputDims(self, inputDims):
"""
Instance method version of class method
"""
return self.calcOutputDims(inputDims,
self._filterDim,
self._boundaryMode)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def getNumPlanes(self):
"""
Instance method version of class method
"""
return self.calcNumPlanes(self._numOrientations,
self._phaseMode,
self._centerSurround)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
@classmethod
def calcOutputDims(cls, inputDims,
filterDim,
boundaryMode,
**keywds):
"""
Public utility method that computes the output dimensions
in form (height, width), given 'inputDims' (height, width),
for a particular 'filterDim'.
"""
# Assign default values to missing parameters
for paramName in ['filterDim', 'boundaryMode']:
if eval(paramName) is None:
defValue = cls._defaults[paramName]
exec("%s = defValue" % paramName)
# Validatation
cls._validate('filterDim', filterDim)
cls._validate('boundaryMode', boundaryMode)
# Compute output dimensions
if boundaryMode == 'sweepOff':
shrinkage = 0
elif boundaryMode == 'constrained':
shrinkage = filterDim - 1
return tuple([dim - shrinkage for dim in inputDims])
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
@classmethod
def calcNumPlanes(cls, numOrientations=None,
phaseMode=None,
centerSurround=None,
**keywds):
"""
Public utility method that computes the number
of responses planes for a particular Gabor
configuration.
"""
# Assign default values to missing parameters
for paramName in ['numOrientations', 'phaseMode', 'centerSurround']:
if eval(paramName) is None:
defValue = cls._defaults[paramName]
exec("%s = defValue" % paramName)
# Validatation
cls._validate('phaseMode', phaseMode)
cls._validate('numOrientations', numOrientations)
cls._validate('centerSurround', centerSurround)
# Compute output planes
numPlanes = numOrientations
if centerSurround:
numPlanes += 1
if phaseMode == 'dual':
numPlanes *= 2
return numPlanes
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _doHolisticValidation(self):
"""
Perform additional validations that operate on
combinations/interactions of parameters.
"""
# We must have at least one response plane
if self.getNumPlanes() < 1:
raise RuntimeError("Configuration error: no response planes; " \
"either 'numOrientations' must be > 0 or " \
"'centerSurround' must be True")
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
@classmethod
def _validate(cls, name, value):
"""
Validate a parameter. Raises a RunTimeError if
the parameter is invalid.
"""
# ------------------------------------------------------
# Filter size:
# Validation: filterDim
if name == "filterDim":
if type(value) != type(0) or \
value < cls.minFilterDim or \
value % 2 != 1:
raise RuntimeError("Value error: '%s' must be an odd integer >= %d; your value: %s" % \
(name, cls.minFilterDim, str(value)))
# ------------------------------------------------------
# Filter responses:
# Validation: numOrientations
elif name == "numOrientations":
if type(value) != type(0) or \
value < cls.minNumOrients:
raise RuntimeError("Value error: '%s' must be an integers >= %d; your value: %s" % \
(name, cls.minNumOrients, str(value)))
# Validation: phaseMode
elif name == "phaseMode":
if value not in cls._validValues[name]:
raise RuntimeError("Value error: '%s' must be one of %s; your value: %s" % \
(name, str(cls._validValues[name]), value))
# Validation: centerSurround
elif name == "centerSurround":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# Validation: targetType
elif name == "targetType":
if value not in cls._validValues[name]:
raise RuntimeError("Value error: '%s' must be one of %; your value: %s" % \
(name, str(cls._validValues[name]), value))
# ------------------------------------------------------
# Normalization:
elif name == "gainConstant":
if type(value) not in [type(0), type(0.0)] or float(value) < 0.0:
raise RuntimeError("Value error: '%s' must be a float or integer >= 0.0; your value: %s" % \
(name, str(value)))
# Validation: targetType
elif name == "normalizationMethod":
if not value in cls._validValues[name]:
raise RuntimeError("Value error: '%s' must be one of %; your value: %s" % \
(name, str(cls._validValues[name]), value))
# Validation: perPlaneNormalization
elif name == "perPlaneNormalization":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# Validation: perPhaseNormalization
elif name == "perPhaseNormalization":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# ------------------------------------------------------
# Post-processing:
# Validation: targetType
elif name == "postProcessingMethod":
if not value in cls._validValues[name]:
raise RuntimeError("Value error: '%s' must be one of %; your value: %s" % \
(name, str(cls._validValues[name]), value))
# Validation: postProcessingSlope
elif name == "postProcessingSlope":
if type(value) not in [type(0), type(0.0)] or float(value) <= 0.0:
raise RuntimeError("Value error: '%s' must be a float or integer > 0.0; your value: %s" % \
(name, str(value)))
# Validation: postProcessingCenter
elif name == "postProcessingCenter":
if type(value) not in [type(0), type(0.0)]:
raise RuntimeError("Value error: '%s' must be a float or integer; your value: %s" % \
(name, str(value)))
# Validation: postProcessingMin
elif name == "postProcessingMin":
if type(value) not in [type(0), type(0.0)]:
raise RuntimeError("Value error: '%s' must be a float or integer; your value: %s" % \
(name, str(value)))
# Validation: postProcessingMax
elif name == "postProcessingMax":
if type(value) not in [type(0), type(0.0)]:
raise RuntimeError("Value error: '%s' must be a float or integer; your value: %s" % \
(name, str(value)))
# Validation: zeroThresholdOut
elif name == "zeroThresholdOut":
if type(value) not in [type(0), type(0.0)]:
raise RuntimeError("Value error: '%s' must be a float or integer >= 0.0; your value: %s" % \
(name, str(value)))
# ------------------------------------------------------
# Boundary effects:
# Validation: boundaryMode
elif name == "boundaryMode":
if not value in cls._validValues[name]:
raise RuntimeError("Value error: '%s' must be one of %; your value: %s" % \
(name, str(cls._validValues[name]), str(value)))
# Validation: offImagePixelValue
elif name == "offImagePixelValue":
if value != 'colorKey' and (type(value) not in (int, float) or float(value) < 0.0 or float(value) > 255.0):
raise RuntimeError("Value error: '%s' must be a float or integer between 0 and 255, or 'colorKey'; your value: %s" % \
(name, str(value)))
# Validation: suppressOutsideBox
elif name == "suppressOutsideBox":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# Validation: forceBoxContraction
elif name == "forceBoxContraction":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# Validation: suppressByAlpha
elif name == "suppressByAlpha":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# ------------------------------------------------------
# Logging
# Validation: logPrefix
elif name == "logPrefix":
if value is not None and (type(value) != type("") or len(value) == 0):
raise RuntimeError("Value error: '%s' must be a string; your value: %s" % \
(name, str(value)))
# ------------------------------------------------------
# Undocumented parameters:
# Validation: aspectRatio
elif name == "nta_aspectRatio":
if type(value) not in [type(0), type(0.)] or value <= 0.0:
raise RuntimeError("Value error: '%s' must be a float > 0.0; your value: %s" % \
(name, str(value)))
# Validation: effectiveWidth
elif name == "nta_effectiveWidth":
if type(value) not in [type(0), type(0.)] or value <= 0.0:
raise RuntimeError("Value error: '%s' must be a float > 0.0; your value: %s" % \
(name, str(value)))
# Validation: wavelength
elif name == "nta_wavelength":
if type(value) not in [type(0), type(0.)] or value <= 0.0:
raise RuntimeError("Value error: '%s' must be a float > 0.0; your value: %s" % \
(name, str(value)))
# Validation: lobeSuppression
elif name == "nta_lobeSuppression":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# Validation: debugLogBuffers
elif name == "nta_debugLogBuffers":
if value not in [True, False]:
raise RuntimeError("Value error: '%s' must be a boolean; your value: %s" % \
(name, str(value)))
# Validation: morphologyMethod
elif name == "nta_morphologyMethod":
if value not in cls._validValues[name]:
raise RuntimeError("Value error: '%s' must be one of %; your value: %s" % \
(name, str(cls._validValues[name]), str(value)))
elif value == "opencv" and cv is None:
raise RuntimeError(
"'%s' was explicitly specified as 'opencv' " \
"but OpenCV is not available on this platform" % name)
# ------------------------------------------------------
# Deprecated parameters:
# Validation: numOrients
elif name == "numOrients":
if type(value) != type(0) or \
value < cls.minNumOrients:
raise RuntimeError("Value error: '%s' must be an integers >= %d; your value: %s" % \
(name, cls.minNumOrients, str(value)))
# Validation: lateralInhibition
elif name == "lateralInhibition":
if type(value) not in [type(0), type(0.0)] or value < 0.0 or value > 1.0:
raise RuntimeError("Value error: '%s' must be a float >= 0 and <= 1; your value: %s" % \
(name, str(value)))
# Validation: validityShrinkage
elif name == "validityShrinkage":
if type(value) not in [type(0), type(0.0)] or float(value) < 0.0 or float(value) > 1.0:
raise RuntimeError("Value error: '%s' must be a float or integer between 0 and 1; your value: %s" % \
(name, str(value)))
# ------------------------------------------------------
# Unknown parameter
else:
raise RuntimeError("Unknown parameter: %s [%s]" % (name, value))
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def initialize(self, dims, splitterMaps):
"""Build the gaborfilter bank.
This method is called after construction.
"""
# Preparations (creation of buffer, etc.)
# Send the dims as a tuple that contains one pair. This needed to make
# the node treat its input as a single scale.
self._prepare((dims,))
# Determine the number of response planes
self._numPlanes = self.getNumPlanes()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def getParameter(self, parameterName, nodeSet=""):
"""
Get the value of an PyMultiNode parameter.
@param parameterName -- the name of the parameter to retrieve, as defined
by the Node Spec.
"""
if parameterName in self._defaults:
# Hidden "nta_" parameters are internally stored as
# class attributes without the leading "nta"
if parameterName.startswith("nta_"):
parameterName = parameterName[4:]
return eval("self._%s" % parameterName)
# Handle standard MRG infrastructure
elif parameterName == 'nta_width':
return self._inputPyramidTopology[0]['numNodes'][0]
elif parameterName == 'nta_height':
return self._inputPyramidTopology[0]['numNodes'][1]
# Handle the maxTopDownOut read-only parameter
elif parameterName == 'maxTopDownOut':
return self._maxTopDownOut
# Handle deprecated parameters
elif parameterName == 'numOrients':
return self._numPlanes
elif parameterName == 'filterPhase':
return self._targetType
elif parameterName == 'nta_edgeMode':
return self._boundaryMode
elif parameterName == 'nta_lateralInhibition':
return 0.0
# Unknown parameter (at least by GaborNode)
else:
return PyRegion.getParameter(self, parameterName, nodeSet)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def setParameter(self, parameterName, parameterValue, nodeSet=""):
"""
Set the value of an PyRegion parameter.
@param parameterName -- the name of the parameter to update, as defined
by the Node Spec.
@param parameterValue -- the value to which the parameter is to be set.
"""
# @todo -- Need to add validation of parameter changes
settableParams = ["suppressOutsideBox", "forceBoxContraction",
"suppressByAlpha", "offImagePixelValue",
"perPlaneNormalization", "perPhaseNormalization",
"nta_debugLogBuffers", "logPrefix",
"zeroThresholdOut"]
regenParams = ["gainConstant", "normalizationMethod",
"postProcessingMethod", "postProcessingSlope",
"postProcessingCenter", "postProcessingMin",
"postProcessingMax"]
if parameterName in settableParams + regenParams:
exec("self._%s = parameterValue" % parameterName)
elif parameterName == 'nta_morphologyMethod':
self._morphologyMethod = parameterValue
# Not one of our parameters
else:
return PyRegion.setParameter(self, parameterName, parameterValue, nodeSet)
# Generate post-processing lookup-tables (LUTs) that will be
# used by the C implementation
if parameterName in regenParams:
self._makeLUTs()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def enableResponseImages(self):
"""
Enable the generation of PIL Images representing the Gabor reponses.
"""
self._makeResponseImages = True
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def disableResponseImages(self):
"""
Disable the generation of PIL Images representing the Gabor reponses.
"""
self._makeResponseImages = False
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def getResponseImages(self, whichResponse='all',
preSuppression=False,
whichScale='all',
whichPhase=0,
whichDirection='bottomUp'):
"""
Return a list of PIL Images representing the Gabor responses
computed upon the latest multi-resolution input image pyramid.
@param whichResponse -- Indicates which Gabor orientation response
should be returned. If 'all' (the default), then false
color composite images will be generated that contains the
gabor responses for all orientations. Otherwise, it should
be an integer index between 0 and numOrients-1, in which
case grayscale images will be generated.
@param preSuppression -- Indicates whether the images should be
generated before bounding box suppression is performed
(if True), or after suppression (if False, the default.)
@param whichScale -- Indicates which multi-resolution scale
should be used to generate the response Images. If 'all'
(the default), then images will be generated for each
scale in the input multi-resolution grid, and will be
returned in a list. Otherwise, it should be an integer
index between 0 and numResolutions-1 (the number of
layers in the multi-resolution grid), in which case a
single Image will be returned (not a list).
@param whichDirection -- Indicates which phase of resonse images should
be returned ('bottomUp', 'topDown', 'combined'). 'bottomUp'
gets the unaltered bottom-up responses, 'top-down' gets the
top-down feedback responses, and 'combined'
@returns -- Either a single PIL Image, or a list of PIL Images
that correspond to different resolutions.
"""
# Make sure response images were enabled
if not self._makeResponseImages:
# Need to generate images now
if whichDirection == 'bottomUp':
if self.response is None:
return
response = self.response
elif whichDirection == 'topDown':
if self.tdInput is None:
return
response = self.tdInput
elif whichDirection == 'combined':
if self.selectedBottomUpOut:
return
response = self.selectedBottomUpOut
if response is None:
# No response to use
return
self._genResponseImages(response, preSuppression=preSuppression, phase=whichDirection)
# Make sure we have images to provide
if self._responseImages is None:
return
# Pull subset of images based on 'preSuppression' setting
imageSet = self._responseImages.get(self._getResponseKey(preSuppression))
# Validate format of 'whichScale' arg
numScales = len(self._inputPyramidTopology)
if whichScale != 'all' and (type(whichScale) != type(0) or whichScale < 0 or whichScale >= numScales):
raise RuntimeError, \
"'whichScale' must be 'all' or an integer between 0 and %d." % self._numScales
# Validate format of 'whichResponse' arg
if whichResponse not in ['all', 'centerSurround']:
if type(whichResponse) != type(0) or whichResponse < 0 or whichResponse >= self._numPlanes:
raise RuntimeError, \
"'whichResponse' must be 'all' or an integer between 0 and %d." % self._numPlanes
# Make sure the requested phase of response exists
if not imageSet.has_key(whichDirection):
return
# Handle "exotic" responses
if whichResponse != 'all':
if whichResponse == 'centerSurround':
whichResponse = self._numOrientations
assert type(whichResponse) == type(0)
if whichPhase > 0:
whichResponse += self._numOrientations
if self._centerSurround:
whichResponse += 1
# Return composite gabor response(s)
return imageSet[whichDirection][whichResponse][whichScale]
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# Public class methods
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
@classmethod
def deserializeImage(cls, serialized):
"""
Helper function that training/testing scripts can invoke in order
to deserialize debugging images provided by the getResponseImages()
method.
"""
image = Image.open(StringIO(serialized))
image.load()
return image
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# Private methods - Overriding base class
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
class ARRAY(ctypes.Structure):
_fields_ = [
("nd", ctypes.c_int),
("dimensions", ctypes.c_void_p),
("strides", ctypes.c_void_p),
("data", ctypes.c_void_p),
]
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _wrapArray(self, array):
"""
Helper function that takes a numpy array and returns
a 4-tuple consisting of ctypes references to the
following:
(nd, dimensions, strides, data)
"""
if array is None:
return None
else:
return ctypes.byref(self.ARRAY(len(array.ctypes.shape),
ctypes.cast(array.ctypes.shape, ctypes.c_void_p),
ctypes.cast(array.ctypes.strides, ctypes.c_void_p),
array.ctypes.data))
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _prepare(self, inputDims):
"""
Perform one-time preparations need for gabor processing.
"""
#inputDims = [(inputDim['numNodes'][1], inputDim['numNodes'][0]) \
# for inputDim in self._inputPyramidTopology]
self.prepare(inputDims)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def prepare(self, inputDims):
"""
Perform one-time preparations need for gabor processing.
Public interface allowing the GaborNode to be tested
outside of the full RTE.
@param inputDims: a list of input image sizes in the
form of 2-tuples (width, height)
"""
# Reverse the input dims into (height, width) format for internal storage
self._numScales = len(inputDims)
self._inputDims = inputDims
# Compute output dims for each input dim
self._outputDims = [self.getOutputDims(inputDim) for inputDim in inputDims]
# Compute the minimum output dimension
self._minInputDim = min([min(inputDim) for inputDim in self._inputDims])
self._minOutputDim = min([min(outputDim) for outputDim in self._outputDims])
# Break out
self._inHeight, self._inWidth = [float(x) for x in self._inputDims[0]]
self._outHeight, self._outWidth = [float(x) for x in self._outputDims[0]]
# Load the _gaborNode C library
libGabor = self._loadLibrary("_algorithms")
# Prepare the C calls
if libGabor:
self._gaborComputeProc = libGabor.gaborCompute
else:
raise Exception('Unable to load gaborNode C library _algorithms')
# If we could not load the library, then we'll default to
# using numpy for our gabor processing.
self._gaborComputeProc = None
# Prepare some data structures in advance
# Allocate working buffers to be used by the C implementation
#self._buffers = [numpy.zeros(inputDim, dtype=numpy.int32) for inputDim in inputDims]
self._allocBuffers()
# Generate post-processing lookup-tables (LUTs) that will be
# used by the C implementation
self._makeLUTs()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _alignToFour(self, val):
"""
Utility macro that increases a value 'val' to ensure
that it is evenly divisible by four (e.g., for
purposes of memory alignment, etc.)
"""
return (((val - 1) / 4) + 1) * 4
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _makeLUTs(self):
"""
Generate post-processing lookup-tables (LUTs) that will be
used by the C implementation
"""
# --------------------------------------------------
# Define LUT parameters
# For 'normalizationMethod' of 'mean', this internal parameter
# controls the trade-off between how finely we can discretize our
# LUT bins vs. how often a raw response value "overflows" the
# maximum LUT bin and has to be clamped. In essence, any raw
# response value greater than 'meanLutCushionFactor' times the
# mean response for the image will "overflow" and be clamped
# to the response value of the largest bin in the LUT.
meanLutCushionFactor = 4.0
# We'll use a LUT large enough to give us decent precision
# but not so large that it causes cache problems.
# A total of 1024 bins seems reasonable:
numLutShifts = 10
numLutBins = (1 << numLutShifts)
# --------------------------------------------------
# Build LUT
# Build our Gabor Bank if it doesn't already exist
self._buildGaborBankIfNeeded()
# Empirically compute the maximum possible response value
# given our current parameter settings. We do this by
# generating a fake image of size (filterDim X filterDim)
# that has a pure vertical edge and then convolving it with
# the first gabor filter (which is always vertically oriented)
# and measuring the response.
testImage = numpy.ones((self._filterDim, self._filterDim), dtype=numpy.float32) * 255.0
#testImage[:, :(self._filterDim/2)] = 0
testImage[numpy.where(self._gaborBank[0] < 0.0)] *= -1.0
maxRawResponse = (testImage * self._gaborBank[0]).sum()
# At run time our Gabor responses will be scaled (via
# bit shifting) so that we can do integer match instead of
# floating point match, but still have high precision.
# So we'll simulate that in order to get a comparable result.
maxShiftedResponse = maxRawResponse / (255.0 * float(self._integerMathScale))
# Depending on our normalization method, our LUT will have a
# different scaling factor (for pre-scaling values prior
# to discretizing them into LUT bins)
if self._normalizationMethod == 'fixed':
postProcScalar = float(numLutBins - 1) / maxShiftedResponse
elif self._normalizationMethod == 'max':
postProcScalar = float(numLutBins - 1)
elif self._normalizationMethod == 'mean':
postProcScalar = float(numLutBins - 1) / meanLutCushionFactor
else:
assert False
# Build LUT
lutInputs = numpy.array(range(numLutBins), dtype=numpy.float32) / postProcScalar
# Sigmoid: output = 1 / (1 + exp(input))
if self._postProcessingMethod == 'sigmoid':
offset = 1.0 / (1.0 + numpy.exp(self._postProcessingSlope * self._postProcessingCenter))
scaleFactor = 1.0 / (1.0 - offset)
postProcLUT = ((1.0 / (numpy.exp(numpy.clip(self._postProcessingSlope \
* (self._postProcessingCenter - lutInputs), \
-40.0, 40.0)) + 1.0)) - offset) * scaleFactor
# For some parameter choices, it is possible that numerical precision
# issues will result in the 'offset' being ever so slightly larger
# than the value of postProcLUT[0]. This will result in a very
# tiny negative value in the postProcLUT[0] slot, which is
# undesireable because the output of a sigmoid should always
# be bound between (0.0, 1.0).
# So we clip the LUT values to this range just to keep
# things clean.
postProcLUT = numpy.clip(postProcLUT, 0.0, 1.0)
# Threshold: Need piecewise linear LUT
elif self._postProcessingMethod == "threshold":
postProcLUT = lutInputs
postProcLUT[lutInputs < self._postProcessingMin] = 0.0
postProcLUT[lutInputs > self._postProcessingMax] = 1.0
# Raw: no LUT needed at all
else:
assert self._postProcessingMethod == "raw"
postProcLUT = None
# If we are in 'dual' phase mode, then we'll reflect
# the LUT on the negative side of zero to speed up
# processing inside the C function.
if False:
if postProcLUT is not None and self._phaseMode == 'dual':
# Make a reflected LUT
comboLut = numpy.concatenate((numpy.fliplr(postProcLUT[numpy.newaxis,:]),
postProcLUT[numpy.newaxis,:]),
axis=1)
# Now clone the reflected LUT and clip it's responses
# for positive and negative phases
postProcLUT = numpy.concatenate((comboLut, comboLut), axis=1).reshape(4*numLutBins)
# First half of it is for positive phase
postProcLUT[:numLutBins] = 0.0
# Second half of it is for negative phase
postProcLUT[-numLutBins:] = 0.0
# Store our LUT and it's pre-scaling factor
self._postProcLUT = postProcLUT
self._postProcLutScalar = postProcScalar
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _allocBuffers(self):
"""
Allocate some working buffers that are required
by the C implementation.
"""
# Allocate working buffers to be used by the C implementation
#self._buffers = [numpy.zeros(inputDim, dtype=numpy.int32) for inputDim in self._inputDims]
# Compute how much "padding" ou input buffers
# we will need due to boundary effects
if self._boundaryMode == 'sweepOff':
padding = self._filterDim - 1
else:
padding = 0
# For each scale, allocate a set of buffers
# Allocate a working "input buffer" of unsigned int32
# We want our buffers to have rows that are aligned on 16-byte boundaries
#self._bufferSetIn = []
#for inHeight, inWidth in self._inputDims:
# self._bufferSetIn = numpy.zeros((inHeight + padding,
# _alignToFour(inWidth + padding)),
# dtype=numpy.int32)
self._bufferSetIn = [numpy.zeros((inHeight + padding,
self._alignToFour(inWidth + padding)),
dtype=numpy.int32) \
for inHeight, inWidth in self._inputDims]
# Allocate a working plane of "output buffers" of unsigned int32
# We want our buffers to have rows that are aligned on 16-byte boundaries
#self._bufferSetOut = []
#for outHeight, outWidth in self._outputDims:
# self._bufferSetOut += numpy.zeros((self._numOrientations,
# outHeight,
# _alignToFour(outWith)),
# dtype=numpy.int32)
numBuffersNeeded = self._numOrientations
if self._centerSurround:
numBuffersNeeded += 1
self._bufferSetOut = [numpy.zeros((numBuffersNeeded,
outHeight,
self._alignToFour(outWidth)),
dtype=numpy.int32) \
for outHeight, outWidth in self._outputDims]
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _initEphemerals(self):
self._gaborComputeProc = None
# For (optional) debug logging, we keep track of the number of
# images we have seen
self._imageCounter = 0
self._bufferSetIn = None
self._bufferSetOut = None
self._morphHeader = None
self._erosion = None
self._numScales = None
self._inputDims = None
self._outputDims = None
self._minInputDim = None
self._minOutputDim = None
self._inHeight = None
self._inWidth = None
self._outHeight = None
self._outWidth = None
self._postProcLUT = None
self._postProcLutScalar = None
self._filterPhase = None
self.response = None
self._responseImages = None
self._makeResponseImages = None
self.tdInput = None
self.selectedBottomUpOut = None
self._tdThreshold = None
self._morphHeader = None
if not hasattr(self, '_numPlanes'):
self._numPlanes = None
# Assign default values to missing parameters
for paramName, paramValue in self._defaults.items():
paramName = self._stripHidingPrefixIfPresent(paramName)
if not hasattr(self, "_%s" % paramName):
exec("self._%s = paramValue" % paramName)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _getEphemeralMembers(self):
"""
Callback (to be overridden) allowing the class to publish a list of
all "ephemeral" members (i.e., data members that should not and/or
cannot be pickled.)
"""
# We can't pickle a pointer to a C function
return [
'_gaborComputeProc',
'_bufferSetIn',
'_bufferSetOut',
'_imageCounter',
'_morphHeader',
'_erosion',
]
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _loadLibrary(self, libraryName, libSubDir=None):
"""
Utility method for portably loading a NuPIC shared library.
Note: we assume the library lives in the NuPIC "lib" directory.
@param: libraryName - the name of the library (sans extension)
@returns: reference to the loaded library; otherwise raises
a runtime exception.
"""
# By default, we will look for our shared library in our
# bindings directory.
if not libSubDir:
libSubDir = "bindings"
# Attempt to load the library
try:
# All of these shared libraries are python modules. Let python find them
# for us. Once it finds us the path, we'll load it with CDLL.
dottedPath = ('.'.join(['nupic', libSubDir, libraryName]))
exec("import %s" % dottedPath)
libPath = eval("%s.__file__" % dottedPath)
lib = ctypes.cdll.LoadLibrary(libPath)
# These calls initialize the logging system inside
# the loaded library. Disabled for now.
# See comments at INIT_FROM_PYTHON in gaborNode.cpp
# pythonSystemRefP = PythonSystem.getInstanceP()
# lib.initFromPython(ctypes.c_void_p(pythonSystemRefP))
return lib
except Exception, e:
print "Warning: Could not load shared library: %s" % libraryName
print "Exception: %s" % str(e)
return None
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def compute(self, inputs, outputs):
"""
Run one iteration of fat node, profiling it if requested.
Derived classes should NOT override this method.
The guts of the compute are contained in the _compute() call so that
we can profile it if requested.
"""
# Modify this line to turn on profiling for a given node. The results file
# ('hotshot.stats') will be sensed and printed out by the vision framework's
# RunInference.py script and the end of inference.
# Also uncomment the hotshot import at the top of this file.
if False:
if self._profileObj is None:
self._profileObj = hotshot.Profile("hotshot.stats", 1, 1)
# filename, lineevents, linetimings
self._profileObj.runcall(self._gaborCompute, *[inputs, outputs])
else:
self._gaborCompute(inputs, outputs)
self._imageCounter += 1
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _getUpperLeftPixelValue(self, inputs, validAlpha=None):
"""
Extract the intensity value of the upper-left pixel.
"""
# Obtain raw input pixel data
#buInputVector = inputs['bottomUpIn'][0].array()
buInputVector = inputs['bottomUpIn']
# Respect valid region for selection of
# color key value
pixelIndex = 0
# If we have an alpha channel, then we need to find
# the first pixel for which the alpha is nonzero
if validAlpha is not None:
# Temporarily decode the polarity that is stored
# in the first alpha element
indicatorValue = validAlpha[0,0]
if indicatorValue < 0.0:
validAlpha[0,0] = -1.0 - indicatorValue
alphaLocns = numpy.where(validAlpha >= 0.5)[0]
# Put the indicator back
validAlpha[0,0] = indicatorValue
# If there are no positive alpha pixels anywhere, then
# just use white (255) as the color key (which may not
# be the "correct" thing to do, but we have no other
# options really.
if len(alphaLocns) == 0:
return 255.0;
pixelIndex = alphaLocns[0]
# Otherwise, if we have a bounding box, then we
# need to find the first (upper-left) pixel in
# the valid bounding box
elif 'validRegionIn' in inputs:
#validRegionIn = inputs['validRegionIn'][0].array()
validRegionIn = inputs['validRegionIn']
left = int(validRegionIn[0])
top = int(validRegionIn[1])
if left > 0 or top > 0:
pixelIndex = left + top * int(self._inWidth)
return buInputVector[pixelIndex]
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _gaborCompute(self, inputs, outputs):
"""
Run one iteration of multi-node.
We are taking the unconventional approach of overridding the
base class compute() method in order to avoid applying the
splitter map, since this is an expensive process for a densely
overlapped node such as GaborNode.
"""
# Build our Gabor Bank (first time only)
self._buildGaborBankIfNeeded()
# If we are using "color-key" mode, then detect the value of
# the upper-left pixel and use it as the value of
# 'offImagePixelValue'
if self._offImagePixelValue == "colorKey":
offImagePixelValue = self._getUpperLeftPixelValue(inputs)
else:
offImagePixelValue = float(self._offImagePixelValue)
# Fast C implementation
# Get our inputs into numpy arrays
buInputVector = inputs['bottomUpIn']
validRegionIn = inputs.get('validRegionIn', None)
# Obtain access to valid alpha region, if it exists
# and if we are configured to use the pixel-accurate
# alpha validity mask (as opposed to using the
# valid bounding box.)
if self._suppressByAlpha and 'validAlphaIn' in inputs:
if self._numScales > 1:
raise NotImplementedError("Multi-scale GaborNodes cannot currently handle alpha channels")
# We assume alpha channels are expressed in a format in
# which '0.0' corresponds to total suppression of
# responses, and '255.0' corresponds to no suppression
# whatsoever, and intermediate values apply a linearly
# proportional degree of suppression (e.g., a value of
# '127.5' would result in a 50% suppression of the
# raw responses.)
#validAlpha = inputs['validAlphaIn'][0].array()[:, numpy.newaxis] * (1.0/255.0)
validAlpha = inputs['validAlphaIn'][:, numpy.newaxis] * (1.0/255.0)
# If we are using an alpha channel, then it will take
# a bit more work to find the correct "upper left"
# pixel because we can't just look for the first
# upper-left pixel in the valid bounding box; we have
# to find the first upper-left pixel in the actual
# valid alpha zone.
if self._offImagePixelValue == "colorKey":
offImagePixelValue = self._getUpperLeftPixelValue(inputs, validAlpha)
else:
validAlpha = None
if self.nta_phaseIndex == 0: # Do bottom-up inference.
self._computeWithC(buInputVector, validRegionIn,
outputs, offImagePixelValue, validAlpha)
# Cache input. The output is already stored in self.response
if self._topDownCombiner is not None and self._stage == 'infer':
self._cachedBUInput = buInputVector
self._cachedValidRegionIn = validRegionIn
else: # Try top-down inference.
cachedBUInput = self._cachedBUInput \
if self._cachedBUInput is not None else numpy.zeros(0)
validCachedBUInput = numpy.array_equal(buInputVector, cachedBUInput)
cachedValidRegionIn = self._cachedValidRegionIn \
if self._cachedValidRegionIn is not None else numpy.zeros(0)
validCachedValidRegionIn = ((validRegionIn is None) or
numpy.array_equal(validRegionIn, cachedValidRegionIn))
# See if we can use the cached values from the last bottom up compute. For better performance,
# we only perform the cache checking when we know we might have top down computes.
topDownConditionsMet = (self.nta_phaseIndex == 1) and \
(self._stage == 'infer') and \
(self._topDownCombiner is not None) and \
validCachedBUInput and validCachedValidRegionIn
if not topDownConditionsMet:
message = (
("Top-down conditions were not met for GaborNode:\n") +
(" phaseIndex=%s (expected %d)\n" % (self.nta_phaseIndex, 1)) +
(" stage='%s' (expected '%s')\n" % (self._stage, "infer")) +
(" topDownCombiner is %s (expected not None)\n" %
("not None" if (self._topDownCombiner is not None) else "None")) +
(" buInputVector %s cache (expected ==)\n" %
("==" if validCachedBUInput else "!=")) +
(" validRegionIn %s cache (expected ==)\n" %
("==" if validCachedValidRegionIn else "!="))
)
import warnings
warnings.warn(message, stacklevel=2)
return
# No need to copy to the node outputs, they should be the same as last time.
# IMPORTANT: When using the pipeline scheduler, you MUST write to the output buffer
# each time because there are 2 output buffers. But, we know that for feedback
# networks, the pipleline scheduler cannot and will not be used, so it's OK to
# skip the write to the output when we have top down computes.
# Perform the topDown compute instead
#print "Gabor topdown"
buOutput = self.response.reshape(self._inputSplitter.shape[0], self._numPlanes)
PyRegion._topDownCompute(self, inputs, outputs, buOutput,
buInputVector)
# DEBUG DEBUG
#self._logPrefix = "debug"
#print "WARNING: using a hacked version of GaborNode.py [forced logging]"
# Write debugging images
if self._logPrefix is not None:
self._doDebugLogging()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _doDebugLogging(self):
"""
Dump the most recently computed responses to logging image files.
"""
preSuppression = False
# Make the response images if they haven't already been made
if not self._makeResponseImages:
self._genResponseImages(self.response, preSuppression=False)
# Write the response images to disk
imageSet = self._responseImages[self._getResponseKey(preSuppression=False)]['bottomUp']
for orient, orientImages in imageSet.items():
for scale, image in orientImages.items():
if type(scale) == type(0):
if type(orient) == type(0):
orientCode = "%02d" % orient
else:
orientCode = "%s" % orient
debugPath = "%s.img-%04d.scale-%02d.orient-%s.png" % (self._logPrefix,
self._imageCounter,
scale, orientCode)
self.deserializeImage(image).save(debugPath)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def filter(self, image, validRegionIn=None,
orientation='all', phase=0,
scaleIndex=0,
cachedResponse=None,
gain=1.0):
"""
Perform gabor filtering on a PIL image, and return a PIL
image containing the composite responses.
@param validRegion: [left, top, right, bottom]
"""
if validRegionIn is None:
validRegionIn = (0, 0, image.size[0], image.size[1])
# Decide whether or not to use numpy
self._buildGaborBankIfNeeded()
# Determine proper input/output dimensions
inHeight, inWidth = self._inputDims[scaleIndex]
outHeight, outWidth = self._outputDims[scaleIndex]
inputSize = inHeight * inWidth
outputSize = outHeight * outWidth * self._numPlanes
inputVector = numpy.array(image.getdata()).astype(RealNumpyDType)
inputVector.shape = (inHeight, inWidth)
assert image.size[1] == inHeight
assert image.size[0] == inWidth
# Locate correct portion of output
outputVector = numpy.zeros((outHeight, outWidth, self._numPlanes), dtype=RealNumpyDType)
outputVector.shape = (self._numPlanes, outHeight, outWidth)
inputVector.shape = (inHeight, inWidth)
# Use a provided responses
if cachedResponse is not None:
response = cachedResponse
# If we need to re-generate the gabor response cache:
else:
# If we are using "color-key" mode, then detect the value of
# the upper-left pixel and use it as the value of
# 'offImagePixelValue'
if self._offImagePixelValue == "colorKey":
# Respect valid region for selection of
# color key value
[left, top, right, bottom] = validRegionIn
offImagePixelValue = inputVector[top, left]
#offImagePixelValue = inputVector[0, 0]
else:
offImagePixelValue = self._offImagePixelValue
# Extract the bounding box signal (if present).
validPyramid = validRegionIn / numpy.array([self._inWidth,
self._inHeight,
self._inWidth,
self._inHeight],
dtype=RealNumpyDType)
# Compute the bounding box to use for our C implementation
bbox = self._computeBBox(validPyramid, outWidth, outHeight)
imageBox = numpy.array([0, 0, self._inputDims[scaleIndex][1],
self._inputDims[scaleIndex][0]],
dtype=numpy.int32)
# Perform gabor processing
self._doGabor(inputVector, bbox, imageBox, outputVector, scaleIndex, offImagePixelValue)
outputVector = numpy.rollaxis(outputVector, 0, 3)
outputVector = outputVector.reshape(outWidth * outHeight, self._numPlanes).flatten()
assert outputVector.dtype == RealNumpyDType
numLocns = len(outputVector) / self._numPlanes
response = outputVector.reshape(numLocns, self._numPlanes)
nCols, nRows = self._outputPyramidTopology[scaleIndex]['numNodes']
startNodeIdx, stopNodeIdx = self._getNodeRangeByScale(scaleIndex)
# Make composite response
if orientation == 'all':
# Build all the single-orientation responses
responseSet = []
for responseIdx in xrange(self._numPlanes):
img = Image.new('L', (nCols, nRows))
img.putdata((gain * 255.0 * response[:stopNodeIdx-startNodeIdx, responseIdx]).astype(numpy.uint8))
responseSet += [img]
finalResponse = self._makeCompositeImage(responseSet)
# Make an individual response
else:
img = Image.new('L', (nCols, nRows))
if orientation == 'centerSurround':
orientation = self._numOrientations
if phase > 0:
orientation += self._numOrientations
if self._centerSurround:
orientation += 1
img.putdata((gain * 255.0 * response[:stopNodeIdx-startNodeIdx, orientation]).astype(numpy.uint8))
finalResponse = img
return finalResponse, response
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _buildGaborBankIfNeeded(self):
"""
Check to see if we have a Gabor Bank, and if not, then build it.
"""
if self._gaborBank is None:
self._buildGaborBank()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _doCompute(self, rfInput, rfMask, rfSize, resetSignal, validPyramid):
"""
Actual compute() implementation. This is a placeholder that should
be overridden by derived sub-classes
@param inputPyramid -- a list of numpy array containing planes of the
input pyramid.
@param rfMask -- a 2-dimensional numpy array (of same shape as 'inputPyramid')
that contains a value of 0.0 for every element that corresponds
to a padded "dummy" (sentinel) value within 'inputPyramid', and
a value of 1.0 for every real input element.
@param rfSize -- a 1-dimensional numpy array (same number of rows as
'inputPyramid') containing the total number of real (non-dummy)
elements for each row of 'inputPyramid'.
@param reset -- boolean indicating whether the current input is the first
of a new temporal sequence.
@param validPyramid -- a 4-element numpy array (vector) that specifies the
zone in which the input pyramid is "valid". A point in the
pyramid is "valid" if that point maps to a location in the
original image, rather than a "padded" region that was added
around the original image in order to scale/fit it into the
dimensions of the input pyramid.
The 4-element array is in the following format:
[left, top, right, bottom]
where 'left' is the fraction (between 0 and 1) of the width of
the image where the valid zone begins, etc.
Returns:
outputPyramid -- a list of numpy arrays containing planes of the
output pyramid.
"""
numGaborFilters = self._gaborBank.shape[1]
numOutputLocns = rfInput.shape[0]
# ---------------------------------------------------------------
# Conceptual pipeline:
#
# 1. Apply Gabor filtering upon the input pixels X to
# generate raw responses Y0 Even in dual-phase mode,
# we will only need to perform the actual computations
# on a single phase (because the responses can be inverted).
#
# 2. Rectify the raw Gabor responses Y0 to produce rectified
# responses Y1.
#
# 3. Apply an adaptive normalization operation to the
# rectified responses Y1 to produce Y2.
#
# 4. Amplify the normalized responses Y2 by a fixed gain G
# to produce amplified responses Y3.
#
# 5. Apply post-processing upon the amplified responses Y3 to
# produce final responses Z.
#
#----------------------------------
# Step 1 - Raw Gabor filtering:
# Convolve each output location against the complete gabor bank.
responseRaw = numpy.dot(rfInput, self._gaborBank)
#----------------------------------
# Step 2 - Rectify responses:
effectiveInfinity = 1.0e7
if self._phaseMode == 'single':
responseRectified = numpy.abs(responseRaw)
elif self._phaseMode == 'dual':
responseRectified = numpy.concatenate((responseRaw.clip(min=0.0, max=effectiveInfinity),
(-responseRaw).clip(min=0.0, max=effectiveInfinity)),
axis=1)
#----------------------------------
# Step 3 - Adaptive normalization:
# Step 4 - Amplification
# If we are not doing any normalization, then it is easy:
if self._normalizationMethod == 'fixed':
# In 'fixed' mode, we simply apply a default normalization
# that takes into account the fact that the input range
# lies between 0 and 255.
responseAmplified = responseRectified * (self._gainConstant / 255.0)
# Otherwise, we have to perform normalization
else:
# First we'll apply the power rule, if needed
if self._normalizationMethod in ['meanPower', 'maxPower']:
responseToUse = (responseRectified * responseRectified)
elif self._normalizationMethod in ['mean', 'max']:
responseToUse = responseRectified
# At this point, our responseRectified array is of
# the shape (totNumOutputLocns, numOrients)
# First, we will perform the max/mean operation over
# the spatial dimensions; the result will be an
# intermediate array of the shape:
# (numScales, numOrients) which will contain the
# max/mean over the spatial dimensions for each
# scale and orientation.
numLayers = len(self._inputPyramidTopology)
layerOffsets = self._computeLayerOffsets(self._inputPyramidTopology)
responseStats = []
for k in xrange(numLayers):
startOffset = layerOffsets[k]
stopOffset = layerOffsets[k+1]
if self._normalizationMethod in ['max', 'maxPower']:
responseStats += [responseToUse[startOffset:stopOffset].max(axis=0)[numpy.newaxis, :]]
elif self._normalizationMethod in ['mean', 'meanPower']:
responseStats += [responseToUse[startOffset:stopOffset].mean(axis=0)[numpy.newaxis, :]]
responseStats = numpy.array(responseStats).reshape(numLayers, self._numPlanes)
# This should be a numpy array containing the desired statistics
# over the spatial dimensions; one statistic for each tuple
# of (scale, orientation)
# If we used a power law, then take the square root of the statistics
if self._normalizationMethod in ['maxPower', 'meanPower']:
responseStats = numpy.sqrt(responseStats)
# Compute statistics over orientation (if needed)
if not self._perOrientNormalization:
if self._normalizationMethod in ['max', 'maxPower']:
responseStats = responseStats.max(axis=1)
elif self._normalizationMethod in ['mean', 'meanPower']:
responseStats = responseStats.mean(axis=1)
responseStats = responseStats[:, numpy.newaxis]
# At this point, responseStats is of shape: (numLayers, 1)
# Compute statistics over scale (if needed)
if not self._perScaleNormalization:
if self._normalizationMethod in ['max', 'maxPower']:
responseStats = responseStats.max(axis=0)
elif self._normalizationMethod in ['mean', 'meanPower']:
responseStats = responseStats.mean(axis=0)
# Expand back out for each scale
responseStats = responseStats[numpy.newaxis, :] * numpy.ones((numLayers, 1))
# Expand back out for each orientation
if not self._perOrientNormalization:
responseStats = responseStats[:, numpy.newaxis] * numpy.ones((1, self._numPlanes))
# Step 4 - Amplification
responseStats = responseStats.reshape(numLayers, self._numPlanes)
gain = self._gainConstant * numpy.ones((numLayers, self._numPlanes), dtype=RealNumpyDType)
nonZeros = numpy.where(responseStats > 0.0)
gain[nonZeros] /= responseStats[nonZeros]
# Fast usage case: neither per-scale nor per-orient normalization
if not self._perScaleNormalization and not self._perOrientNormalization:
responseAmplified = responseRectified * gain[0, 0]
# Somewhat slower: per-orient (but not per-scale) normalization
elif not self._perScaleNormalization:
responseAmplified = responseRectified * gain[0, :]
# Slowest: per-scale normalization
else:
responseAmplified = None
for k in xrange(numLayers):
startOffset = layerOffsets[k]
stopOffset = layerOffsets[k+1]
if not self._perOrientNormalization:
gainToUse = gain[k, 0]
else:
gainToUse = gain[k, :]
thisResponse = responseRectified[startOffset:stopOffset, :] * gainToUse
if responseAmplified is None:
responseAmplified = thisResponse
else:
responseAmplified = numpy.concatenate((responseAmplified, thisResponse), axis=0)
#----------------------------------
# Step 5 - Post-processing
# No post-processing (linear)
if self._postProcessingMethod == "raw":
responseFinal = responseAmplified
# Sigmoidal post-processing
elif self._postProcessingMethod == "sigmoid":
offset = 1.0 / (1.0 + numpy.exp(self._postProcessingSlope * self._postProcessingCenter))
scaleFactor = 1.0 / (1.0 - offset)
responseFinal = ((1.0 / (numpy.exp(numpy.clip(self._postProcessingSlope \
* (self._postProcessingCenter - responseAmplified), \
-40.0, 40.0)) + 1.0)) - offset) * scaleFactor
# Piece-wise linear post-processing
elif self._postProcessingMethod == "threshold":
responseFinal = responseAmplified
responseFinal[responseAmplified < self._postProcessingMin] = 0.0
responseFinal[responseAmplified > self._postProcessingMax] = 1.0
#----------------------------------
# Optional: Dump statistics for comparative purposes
#self._dumpStats(responseFinal, "gabor.stats.txt")
# Generate raw response images (prior to suppression)
if self._makeResponseImages:
self._genResponseImages(responseFinal, preSuppression=True)
# Apply suppression to responses outside valid pyramid.
if self._suppressOutsideBox:
self._applyValiditySuppression(responseFinal, validPyramid)
# Perform the zeroOutThreshold clipping now if requested
if self._zeroThresholdOut > 0.0:
# Get the max of each node
nodeMax = responseFinal.max(axis=1).reshape(numOutputLocns)
# Zero out children where all elements are below the threshold
responseFinal[nodeMax < self._zeroThresholdOut] = 0
# Generate final response images (after suppression)
if self._makeResponseImages:
self._genResponseImages(responseFinal, preSuppression=False)
# Store the response so that it can be retrieved later
self.response = responseFinal
return responseFinal
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _applyValiditySuppression(self, response, validPyramid):
"""
Apply suppression to responses outside valid pyramid.
This overrides the default PyRegion implementation.
"""
# We compute the valid fraction of each output locations' RF by
# computing the valid fraction of it's spatial dimension.
# @todo -- Generalize this to handle more than two spatial dimensions.
validX = (self._rfMaxX.clip(min=validPyramid[0], max=validPyramid[2]) - \
self._rfMinX.clip(min=validPyramid[0], max=validPyramid[2])) * \
self._rfInvLenX
validY = (self._rfMaxY.clip(min=validPyramid[1], max=validPyramid[3]) - \
self._rfMinY.clip(min=validPyramid[1], max=validPyramid[3])) * \
self._rfInvLenY
# At this point the validX and validY numpy vectors contain values
# between 0 and 1 that encode the validity of each output location
# with respect to the X and Y spatial dimensions, respectively.
# Now we map the raw validities of each output location into
# suppression factors; i.e., a scalar (for each output location)
# that will be multiplied against each response for that particular
# output location.
# Use a hard threshold:
# Discovered a nasty, subtle bug here. The code used to be like this:
#
# suppressionFactor = ((validX * validY) >= self._validitySuppressionLow).astype(RealNumpyDType)
#
# However, in the case of validitySuppressionLow of 1.0, numpy experienced
# "random" roundoff errors, and nodes for which both validX and validY were
# 1.0 would be computed as 1 - epsilon, which would fail the test against
# validitySuppressionLow, and thus get suppressed incorrectly.
# So we introduced an epsilon to deal with this situation.
suppressionFactor = ((validX * validY) + self._epsilon >= \
self._validitySuppressionLow).astype(RealNumpyDType)
# Apply the suppression factor to the output response array
response *= suppressionFactor[:, numpy.newaxis]
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _dumpStats(self, response, statsLogPath):
"""
In order to do a kind of "unit testing" of the GaborNode
tuning parameters for a particular application, it is useful
to dump statistics on the responses at different scales
and orientations/phases.
We'll dump the following statistics for each (scale, orientation) tuple:
* response mean
* response standard deviation
* power mean (squared response mean)
* response max
@param response -- response array of shape (totNumOutputLocns, numOrients)
"""
meanResponse = []
meanPower = []
stddevResponse = []
maxResponse = []
# Compute a squared (power) response
power = response * response
# Compute our mean/max/stddev statistics over the spatial dimensions
# for each scale and for each orientation. The result will be four
# array of shape: (numScales, numOrients) which will contain the
# statistics over the spatial dimensions for each scale and orientation.
numLayers = len(self._outputPyramidTopology)
layerOffsets = self._computeLayerOffsets(self._outputPyramidTopology)
for k in xrange(numLayers):
startOffset = layerOffsets[k]
stopOffset = layerOffsets[k+1]
# Mean response
meanResponse += [response[startOffset:stopOffset].mean(axis=0)[numpy.newaxis, :]]
# Max response
maxResponse += [response[startOffset:stopOffset].max(axis=0)[numpy.newaxis, :]]
# Std. deviation response
stddevResponse += [response[startOffset:stopOffset].std(axis=0)[numpy.newaxis, :]]
# Mean power
meanPower += [power[startOffset:stopOffset].mean(axis=0)[numpy.newaxis, :]]
# Now compile the responses at each scale into overall arrays
# of shape: (numScales, numOrientations)
meanResponse = numpy.array(meanResponse).reshape(numLayers, self._numPlanes)
maxResponse = numpy.array(maxResponse).reshape(numLayers, self._numPlanes)
stddevResponse = numpy.array(stddevResponse).reshape(numLayers, self._numPlanes)
meanPower = numpy.array(meanPower).reshape(numLayers, self._numPlanes)
# Finally, form the different statistics into a single desriptive vector
responseStats = numpy.concatenate((meanResponse[numpy.newaxis,:,:],
maxResponse[numpy.newaxis,:,:],
stddevResponse[numpy.newaxis,:,:],
meanPower[numpy.newaxis,:,:]), axis=0)
# Append to the stats log
fpStatsLog = open(statsLogPath, "a")
response = " ".join(["%f" % x for x in responseStats.flatten().tolist()])
fpStatsLog.write(response + "\n")
fpStatsLog.close()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _doTopDownInfer(self, tdInput, tdNumParents, buOutput, buInput):
"""
Actual top down compute() implementation. This is a placeholder that should
be overridden by derived sub-classes.
@param tdInput -- a 3D array containing the top-down inputs to each baby node.
Think of this as N 2D arrays, where N is the number of baby nodes.
Each baby node's 2D array has R rows, where each row is the top-down
output from one of the parents. The width of each row is equal to the
width of the bottomUpOut of the baby node. If a baby node
has only 2 parents, but R is 5 for example, then the last 3 rows
of the 2D array will contain all 0's. The tdNumParents argument
can be referenced to find out how many parents the node actually has.
The tdInput array is structured in this manner to make it easy to
sum the contributions from the parents. All the sub-class needs to
do is a numpy.add.reduce(tdInput, axis=1).
@param tdNumParents a vector whose length is equal to the number of baby nodes. Each
element contains the number of parents of each baby node.
@param buInput -- a 2D array containing the bottom-up inputs to each baby node.
This is the same input that is passed to the _doCompute() method,
but it is called rfInput there.
@param buOutput -- a 2D array containing the results of the bottomUp compute for
this node. This is a copy of the return value returned from the
_doCompute method of the node.
Returns:
tdOutput -- a 2-D numpy array containing the outputs from each baby node. Each
row is a baby node output.
"""
# NOTE: Making this a float32 makes the copy to the node outputs at the end of
# the compute faster.
#tdOutput = numpy.zeros(self._inputSplitter.shape, dtype='float32')
# print "Top-down infer called on a Gabor node. Use breakpoint to step through"
# print "and make sure things are as expected:"
# import pdb; pdb.set_trace()
numBabyNodes = len(tdInput)
numOrients = len(tdInput[0][0])
assert self._numPlanes == numOrients # Number of filters must match top-down input
tdThreshold = numpy.ones((numBabyNodes, numOrients))
version=('tdThreshold', 'combine', 'td_normalize')
minResponse=1e-10
# Average top-down inputs for each baby Node
tdInput_avg = numpy.add.reduce(tdInput, axis=1) / tdNumParents
# For the gabor node, we will usually get 1 orientation fed down from
# the complex level above us. This is because the SparsePooler above that
# sparsified it's inputs and only saves one orientation from each complex node.
# But, for the Gabor node which is at the bottom of the hierarchy, it makes more
# sense to spread the topdown activation among all the orientations since
# each gabor covers only a few pixels and won't select one object from another.
tdMaxes = tdInput_avg.max(axis=1)
tdInput_avg *= 0
tdInput_avg += tdMaxes.reshape(-1,1)
if tdInput_avg.max() <= minResponse:
#print "Top-down Input is Blank"
pass
else:
if 'combine' in version: # Combine top-down and bottom-up inputs
tdInput_avg *= buOutput
if 'td_normalize' in version: # Normalize top-down inputs for viewing
# td_max = tdInput_avg.max()
# tdInput_avg /= td_max
td_max = tdInput_avg.max()
if td_max != 0:
tdInput_avg /= td_max
if 'tdThreshold' in version: # Use tdInput_avg to threshold bottomUp outputs
if not hasattr(self, '_tdThreshold'):
self._tdThreshold = 0.01
tdThreshold = tdInput_avg > self._tdThreshold
self.tdInput = tdInput_avg
self.selectedBottomUpOut = buOutput * tdThreshold
theMax = self.selectedBottomUpOut.max()
if theMax > 0:
self.selectedBottomUpOut /= theMax
# Generate response images
if self._makeResponseImages:
self._genResponseImages(self.tdInput, preSuppression=False, phase='topDown')
self._genResponseImages(self.selectedBottomUpOut, preSuppression=False,
phase='combined')
# Generate the topDown outputs. At this point, tdMaxes contains the max gabor orientation
# output from each baby node. We will simply "spread" this value across all of the
# topDown outputs for each baby node as an indication of their input activation level.
# In a perfect world, you would try and reconstruct the input by summing the inverse of the
# gabor operation for each output orientation. But, for now, we are only using the top
# down output of the Gabor as an indication of the relative input strength to each gabor
# filter - essentially as a mask on the input image.
tdOutput = numpy.ones(self._inputSplitter.shape, dtype='float32')
tdOutput *= tdMaxes.reshape(-1,1)
# Save the maxTopDownOut for each baby node so that it can be returned as a read-only
# parameter. This provides faster performance for things like the top down image inspector
# that only need the max output from each node
self._maxTopDownOut = tdMaxes
return tdOutput
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _computeWithC(self,
inputPlane,
validRegionIn,
outputs,
offImagePixelValue,
validAlpha):
"""
Perform Gabor processing using custom C library.
"""
if validRegionIn is None:
validRegionIn = (0, 0, self._inWidth, self._inHeight)
inputLen = len(inputPlane)
if self._inputPyramidTopology is None or \
inputLen == self._inWidth * self._inHeight * len(self._inputPyramidTopology):
isPadded = True
else:
assert inputLen == sum([lvl['numNodes'][0] * lvl['numNodes'][1] \
for lvl in self._inputPyramidTopology])
isPadded = False
# Extract the bounding box signal (if present).
validPyramid = validRegionIn / numpy.array([self._inWidth,
self._inHeight,
self._inWidth,
self._inHeight],
dtype=RealNumpyDType)
# First extract a numpy array containing the entire input vector
assert inputPlane.dtype == numpy.float32
# Convert the output images to a numpy vector
#outputPlane = outputs['bottomUpOut'].wvector()[:].array()
outputPlane = outputs['bottomUpOut']
assert outputPlane.dtype == numpy.float32
inputOffset = 0
outputOffset = 0
for scaleIndex in xrange(self._numScales):
# Handle padded case (normal)
if isPadded:
inputScaleIndex = 0
# Handle packed case (deployed)
else:
inputScaleIndex = scaleIndex
# Determine proper input/output dimensions
inHeight, inWidth = self._inputDims[inputScaleIndex]
outHeight, outWidth = self._outputDims[scaleIndex]
inputSize = inHeight * inWidth
outputSize = outHeight * outWidth * self._numPlanes
# Locate correct portion of input
inputVector = inputPlane[inputOffset:inputOffset+inputSize]
inputOffset += inputSize
inputVector.shape = (inHeight, inWidth)
# Locate correct portion of output
outputVector = outputPlane[outputOffset:outputOffset+outputSize]
outputVector.shape = (self._numPlanes, outHeight, outWidth)
# Compute the bounding box to use for our C implementation
bbox = self._computeBBox(validPyramid, self._inputDims[scaleIndex][1],
self._inputDims[scaleIndex][0])
imageBox = numpy.array([0, 0, self._inputDims[scaleIndex][1],
self._inputDims[scaleIndex][0]],
dtype=numpy.int32)
## --- DEBUG CODE ----
#global id
#o = inputVector
#print outputVector.shape, len(o)
#f = os.path.abspath('gabor_input_%d.txt' % id)
#print f
#numpy.savetxt(f, o)
#id += 1
##from dbgp.client import brk; brk(port=9019)
## --- DEBUG CODE END ----
# Erode and/or dilate the alpha channel
# @todo -- This should be moved into the C function
if validAlpha is not None:
validAlpha = self._adjustAlphaChannel(validAlpha)
# Perform gabor processing
self._doGabor(inputVector,
bbox,
imageBox,
outputVector,
scaleIndex,
offImagePixelValue,
validAlpha)
# Optionally, dump working buffers for debugging purposes
if self._debugLogBuffers:
self._logDebugBuffers(outputVector, scaleIndex);
# Note: it would be much better if we did not have to do this
# post-processing "transposition" operation, and instead just
# performed all the different orientation computations for
# each pixel.
# Note: this operation costs us about 1 msec
outputVector = numpy.rollaxis(outputVector, 0, 3)
outputVector = outputVector.reshape(outWidth * outHeight, self._numPlanes)
assert outputVector.dtype == numpy.float32
# Perform the zeroOutThreshold clipping now if requested
# @todo -- This should be moved into the C function
if self._zeroThresholdOut > 0.0:
# Get the max of each node
nodeMax = outputVector.max(axis=1).reshape(outWidth * outHeight)
# Zero out children where all elements are below the threshold
outputVector[nodeMax < self._zeroThresholdOut] = 0.0
outputPlane[outputOffset:outputOffset+outputSize] = outputVector.flatten()
outputOffset += outputSize
# Generate final response images (after suppression)
if self._makeResponseImages:
self._genResponseImages(outputPlane, preSuppression=False)
# Store the response so that it can be retrieved later
self.response = outputPlane
## --- DEBUG CODE ----
#global id
#o = outputPlane
##print outputVector.shape, len(o)
#f = os.path.abspath('gabor_output_%d.txt' % id)
#print f
#numpy.savetxt(f, o)
#id += 1
##from dbgp.client import brk; brk(port=9019)
## --- DEBUG CODE END ----
# De-multiplex inputs/outputs
#outputs['bottomUpOut'].wvector()[:] = outputPlane
outputs['bottomUpOut'] = outputPlane
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _adjustAlphaChannel(self, alphaMask):
"""
Apply an alpha suppression channel (in place) to each plane
of gabor responses.
@param alphaMask: a numpy array of shape (numPixels, 1)
containing the alpha mask that determines which responses
are to be suppressed. If the values in the alpha mask
are in the range (0.0, 255.0), then the alpha mask will
be eroded by halfFilterDim; if the values in the alpha
mask are in the range (-255.0, 0.0), then the mask will
be dilated by halfFilterDim.
"""
# Determine whether to erode or dilate.
# In order to make this determination, we check
# the sign of the first alpha pixel:
#
# MorphOp true mask[0,0] alpha[0,0] code
# ======= ============== ===============
# erode 0 (background) 0
# erode 255 (foreground) 255
# dilate 0 (background) -1
# dilate 255 (foreground) -256
indicatorValue = alphaMask[0,0]
if indicatorValue < 0.0:
operation = 'dilate'
# Convert the alpha value back to it's
# true value
alphaMask[0,0] = -1.0 - indicatorValue
else:
operation = 'erode'
# We need to perform enough iterations to cover
# half of the filter dimension
halfFilterDim = (self._filterDim - 1) / 2
if self._morphologyMethod == "opencv" or \
(self._morphologyMethod == "best" and cv is not None):
# Use the faster OpenCV code path
assert cv is not None
# Lazily allocate the necessary OpenCV wrapper structure(s)
self._prepMorphology()
# Make the OpenCV image header structure's pixel buffer
# pointer point at the underlying memory buffer of
# the alpha channel (numpy array)
self._morphHeader.contents.imageData = alphaMask.ctypes.data
# Perform dilation in place
if operation == 'dilate':
cv.Dilate(self._morphHeader, self._morphHeader, iterations=halfFilterDim)
# Perform erosion in place
else:
cv.Erode(self._morphHeader, self._morphHeader, iterations=halfFilterDim)
else:
# Use the custom C++ code path
if not self._erosion:
from nupic.bindings.algorithms import Float32Erosion
self._erosion = Float32Erosion()
self._erosion.init(int(self._inHeight), int(self._inWidth))
# Perform the erosion/dilation in-place
self._erosion.compute(alphaMask,
alphaMask,
halfFilterDim,
(operation=='dilate'))
# Legacy numpy method
# If we are in constrained mode, then the size of our
# response planes will be less than the size of our
# alpha mask (by halfFilterDim along each edge).
# So we need to "shave off" halfFilterDim pixels
# from all edges of the alpha mask before applying
# suppression to the response planes.
inWidth = int(self._inWidth)
inHeight = int(self._inHeight)
# For erosion mode, we need to shave off halfFilterDim
# from the four edges of the alpha mask.
if operation == "erode":
alphaMask.shape = (inHeight, inWidth)
alphaMask[:halfFilterDim, :] = 0.0
alphaMask[-halfFilterDim:, :] = 0.0
alphaMask[:, :halfFilterDim] = 0.0
alphaMask[:, -halfFilterDim:] = 0.0
alphaMask.shape = (inHeight * inWidth, 1)
# For dilation mode, we need to shave off halfFilterDim
# from any edge of the alpha mask that touches the
# image boundary *unless* the alpha mask is "full"
# (i.e., consumes the entire image.)
elif operation == "dilate":
# Handle top, bottom, left, and right
alphaMask.shape = (inHeight, inWidth)
zapTop = numpy.where(alphaMask[0,:])[0]
zapBottom = numpy.where(alphaMask[-1,:])[0]
zapLeft = numpy.where(alphaMask[:,0])[0]
zapRight = numpy.where(alphaMask[:,-1])[0]
# Apply zaps unless all of them are of the full
# length possible
if len(zapTop) < inWidth or len(zapBottom) < inWidth or \
len(zapLeft) < inHeight or len(zapRight) < inHeight:
alphaMask[:halfFilterDim, zapTop] = 0.0
alphaMask[-halfFilterDim:, zapBottom] = 0.0
alphaMask[zapLeft, :halfFilterDim] = 0.0
alphaMask[zapRight, -halfFilterDim:] = 0.0
alphaMask.shape = (inHeight * inWidth, 1)
return alphaMask
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _prepMorphology(self):
"""
Prepare buffers used for eroding/dilating alpha
channels.
"""
# Check if we've already allocated a header
#if not hasattr(self, '_morphHeader'):
if not getattr(self, '_morphHeader', None):
if cv is None:
raise RuntimeError("OpenCV not available on this platform")
# Create a header only (not backed by data memory) that will
# allow us to operate on numpy arrays (valid alpha channels)
# using OpenCV operations
self._morphHeader = cv.CreateImageHeader(cv.Size(int(self._inWidth),
int(self._inHeight)), 32, 1)
# @todo: this will leak a small bit of memory every time
# we create and use a new GaborNode unless we find a way
# to guarantee the invocation of cv.ReleaseImageHeader()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _computeBBox(self, validPyramid, inWidth, inHeight):
"""
Compute a bounding box given the validPyramid (a fraction
of the valid input region as provided by the sensor) and
the output dimensions for a particular current scale.
"""
# Assemble the bounding box by converting 'validPyramid' from float (0,1) to integer (O,N)
if self._suppressOutsideBox:
halfFilterDim = (self._filterDim - 1) / 2
bbox = numpy.round((validPyramid * numpy.array([inWidth, inHeight, inWidth, inHeight],
dtype=validPyramid.dtype))).astype(numpy.int32)
# Subtract enough padding for our filter on all four edges
# We'll only subtract enough padding if we have a non-trivlal bounding box.
# In other words, if our validRegionIn is [0, 25, 200, 175] for input image
# dimensions of [0, 0, 200, 200], then we will assume that two horizontal strips
# of filler pixels were artificially added at the top and bottom, but no
# such artificial vertical strips were added. So we don't need to erode the
# bounding box horizontally, only vertically.
if self._forceBoxContraction or bbox[0] > 0:
bbox[0] += halfFilterDim
if self._forceBoxContraction or bbox[1] > 0:
bbox[1] += halfFilterDim
if self._forceBoxContraction or bbox[2] < inWidth:
bbox[2] -= halfFilterDim
if self._forceBoxContraction or bbox[3] < inHeight:
bbox[3] -= halfFilterDim
# Clip the bounding box to the size of the image
bbox[0] = max(bbox[0], 0)
bbox[1] = max(bbox[1], 0)
bbox[2] = min(bbox[2], inWidth)
bbox[3] = min(bbox[3], inHeight)
# Make sure the bounding box didn't become negative width/height
bbox[0] = min(bbox[0], bbox[2])
bbox[1] = min(bbox[1], bbox[3])
# If absolutely no suppression is requested under any
# circumstances, then force the bbox to be the entire image
else:
bbox = numpy.array([0, 0, inWidth, inHeight], dtype=numpy.int32)
# Check in case bbox is non-existent or mal-formed
if bbox[0] < 0 or bbox[1] < 0 or bbox[2] <= bbox[0] or bbox[3] <= bbox[1]:
print "WARNING: empty or malformed bounding box:", bbox
# Fix bbox so that it is a null box but at least not malformed
if bbox[0] < 0:
bbox[0] = 0
if bbox[1] < 0:
bbox[1] = 0
if bbox[2] < bbox[0]:
bbox[2] = bbox[0]
if bbox[3] < bbox[1]:
bbox[3] = bbox[1]
return bbox
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _logDebugBuffers(self, outputVector, scaleIndex, outPrefix="debug"):
"""
Dump detailed debugging information to disk (specifically, the
state of internal working buffers used by C implementaiton.
@param outPrefix -- Prefix to prepend to standard names
for debugging images.
"""
# Save input buffer
self._saveImage(self._bufferSetIn[scaleIndex],
"%s.buffer.in.%02d.png" % (outPrefix, scaleIndex))
# Save output buffer planes
for k in xrange(self._bufferSetOut[scaleIndex].shape[0]):
# We do integer arithmetic shifted by 12 bits
buf = (self._bufferSetOut[scaleIndex][k] / 4096).clip(min=0, max=255);
self._saveImage(buf, "%s.buffer.out.%02d.%02d.png" % (outPrefix, scaleIndex, k))
# Save raw gabor output images (from C implementation)
for k in xrange(self._numPlanes):
self._saveImage(outputVector[k], "%s.out.%02d.%02d.png" % \
(outPrefix, scaleIndex, k))
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _saveImage(self, imgArray, outPath):
imgDims = imgArray.shape
img = Image.new('L', (imgDims[1], imgDims[0]))
if imgArray.dtype == numpy.float32:
img.putdata( ((254.9 * imgArray.flatten()).clip(min=0.0, max=255.0)).astype(numpy.uint8) )
#img.putdata((255.0 * imgArray.flatten()).astype(numpy.uint8))
elif imgArray.dtype == numpy.int32:
img.putdata((imgArray.flatten()).astype(numpy.uint8))
else:
assert imgArray.dtype == numpy.uint8
img.putdata(imgArray.flatten())
img.save(outPath)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _doGabor(self, inputVector,
bbox,
imageBox,
outputVector,
scaleIndex,
offImagePixelValue=None,
validAlpha=None):
"""
Prepare arguments and invoke C function for
performing actual 2D convolution, rectification,
normalization, and post-processing.
"""
if offImagePixelValue is None:
assert type(offImagePixelValue) in [type(0), type(0.0)]
offImagePixelValue = self._offImagePixelValue
# If we actually have a valid validAlpha mask,
# then reshape it to the input image size
if validAlpha is not None:
origAlphaShape = validAlpha.shape
validAlpha.shape = inputVector.shape
# Invoke C function
result = self._gaborComputeProc(
self._wrapArray(self._gaborBank),
self._wrapArray(inputVector),
self._wrapArray(validAlpha),
self._wrapArray(bbox),
self._wrapArray(imageBox),
self._wrapArray(outputVector),
ctypes.c_float(self._gainConstant),
self._mapParamFromPythonToC('boundaryMode'),
ctypes.c_float(offImagePixelValue),
self._mapParamFromPythonToC('phaseMode'),
self._mapParamFromPythonToC('normalizationMethod'),
self._mapParamFromPythonToC('perPlaneNormalization'),
self._mapParamFromPythonToC('perPhaseNormalization'),
self._mapParamFromPythonToC('postProcessingMethod'),
ctypes.c_float(self._postProcessingSlope),
ctypes.c_float(self._postProcessingCenter),
ctypes.c_float(self._postProcessingMin),
ctypes.c_float(self._postProcessingMax),
self._wrapArray(self._bufferSetIn[scaleIndex]),
self._wrapArray(self._bufferSetOut[scaleIndex]),
self._wrapArray(self._postProcLUT),
ctypes.c_float(self._postProcLutScalar),
)
if result < 0:
raise Exception("gaborCompute failed")
# If we actually have a valid validAlpha mask,
# then reshape it back to it's original shape
if validAlpha is not None:
validAlpha.shape = origAlphaShape
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _convertEnumValue(self, enumValue):
"""
Convert a Python integer object into a ctypes integer
that can be passed to a C function and seen as an
int on the C side.
"""
return ctypes.c_int(enumValue)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _mapParamFromPythonToC(self, paramName):
"""
Map Python object values to equivalent enumerated C values.
"""
# boundaryMode
if paramName == "boundaryMode":
if self._boundaryMode == 'constrained':
enumValue = 0
elif self._boundaryMode == 'sweepOff':
enumValue = 1
return self._convertEnumValue(enumValue)
# phaseMode
elif paramName == "phaseMode":
if self._phaseMode == 'single':
enumValue = 0
elif self._phaseMode == 'dual':
enumValue = 1
return self._convertEnumValue(enumValue)
# normalizationMethod
elif paramName == "normalizationMethod":
if self._normalizationMethod == 'fixed':
enumValue = 0
elif self._normalizationMethod == 'max':
enumValue = 1
elif self._normalizationMethod == 'mean':
enumValue = 2
#elif self._normalizationMethod == 'maxPower':
# enumValue = 3
#elif self._normalizationMethod == 'meanPower':
# enumValue = 4
return self._convertEnumValue(enumValue)
# perPlaneNormalization
elif paramName == "perPlaneNormalization":
if not self._perPlaneNormalization:
enumValue = 0
else:
enumValue = 1
return self._convertEnumValue(enumValue)
# perPhaseNormalization
elif paramName == "perPhaseNormalization":
if not self._perPhaseNormalization:
enumValue = 0
else:
enumValue = 1
return self._convertEnumValue(enumValue)
# postProcessingMethod
elif paramName == "postProcessingMethod":
if self._postProcessingMethod == 'raw':
enumValue = 0
elif self._postProcessingMethod == 'sigmoid':
enumValue = 1
elif self._postProcessingMethod == 'threshold':
enumValue = 2
return self._convertEnumValue(enumValue)
# Invalid parameter
else:
assert False
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# Private helper methods
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _getValidEdgeModes(self):
"""
Returns a list of the valid edge modes.
"""
return ['constrained', 'sweepOff']
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _serializeImage(self, image):
"""
Serialize a PIL image so that it can be transported through
the runtime engine.
"""
s = StringIO()
format = 'png'
if hasattr(image, 'format') and image.format:
format = image.format
try:
image.save(s, format=format)
except:
image.save(s, format='png')
return s.getvalue()
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _getResponseKey(self, preSuppression):
"""
Returns a key used to index the response image dict
(either 'raw' or 'final')
"""
if preSuppression:
return 'raw'
else:
return 'final'
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _genResponseImages(self, rawResponse, preSuppression, phase='bottomUp'):
"""
Generate PIL images from the response array.
@param preSuppression -- a boolean, which indicates whether to
store the generated images using the key 'raw' (if True)
or 'final' (if False) within the _responseImages member dict.
@param phase -- 'bottomUp', 'topDown', or 'combined', depending on which
phase of response image we're generating
Generate a dict of dicts. The primary dict is keyed by response,
which can be either 'all' or an integer between 0 and numOrients-1;
the secondary dicts are keyed by scale, which can be either 'all'
or an integer between 0 and numScales.
"""
if phase not in ('bottomUp', 'topDown', 'combined'):
raise RuntimeError, "phase must be either 'bottomUp', 'topDown', or 'combined'"
numLocns = len(rawResponse.flatten()) / self._numPlanes
response = rawResponse.reshape(numLocns, self._numPlanes)
#numScales = len(self._inputPyramidTopology)
numScales = self._numScales
imageSet = {}
# Build all the single-orientation responses
for responseIdx in xrange(self._numPlanes):
responseSet = {}
# Build all the scales
for scaleIdx in xrange(numScales):
responseSet[scaleIdx] = self._makeImage(response, scaleIdx, responseIdx)
# Build the "all scale" list
#responseSet['all'] = responseSet.values()
imageSet[responseIdx] = responseSet
# Build the composite respones
responseSet = {}
for scaleIdx in xrange(numScales):
scaleSet = [imageSet[orientIdx][scaleIdx] for orientIdx in xrange(self._numPlanes)]
responseSet[scaleIdx] = self._makeCompositeImage(scaleSet)
imageSet['all'] = responseSet
# Serialize all images
for orientIdx, orientResponses in imageSet.items():
for scaleIdx, scaleResponse in orientResponses.items():
imageSet[orientIdx][scaleIdx] = self._serializeImage(scaleResponse)
imageSet[orientIdx]['all'] = imageSet[orientIdx].values()
# Store the image set
if self._responseImages is None:
self._responseImages = {self._getResponseKey(preSuppression): {}}
self._responseImages[self._getResponseKey(preSuppression)][phase] = imageSet
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _getNodeRangeByScale(self, whichScale):
"""
Returns a 2-tuple of node indices corresponding to the set of
nodes associated with the specified 'whichScale'.
"""
assert whichScale >= 0
#assert whichScale < len(self._outputPyramidTopology)
assert whichScale < self._numScales
startNodeIdx = 0
#for scaleIndex, outputTopo in enumerate(self._outputPyramidTopology):
for scaleIndex, outputDim in enumerate(self._outputDims):
#nCols, nRows = outputTopo['numNodes']
nRows, nCols = outputDim
stopNodeIdx = startNodeIdx + nCols * nRows
if scaleIndex == whichScale:
return (startNodeIdx, stopNodeIdx)
else:
startNodeIdx = stopNodeIdx
assert False
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _makeImage(self, response, whichScale, whichOrient, gain=1.0):
"""
Generate a single PIL image (using the raw response array) for a
particular scale and orientation.
"""
#nCols, nRows = self._outputPyramidTopology[whichScale]['numNodes']
nRows, nCols = self._outputDims[whichScale]
img = Image.new('L', (nCols, nRows))
startNodeIdx, stopNodeIdx = self._getNodeRangeByScale(whichScale)
img.putdata((gain * 255.0 * response[startNodeIdx:stopNodeIdx,
whichOrient]).clip(min=0.0, max=255.0).astype(numpy.uint8))
return img
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _makeCompositeImage(self, imageSet):
"""
Create a false color composite image of the individiual
orientation-specific gabor response images in 'imageSet'.
"""
# Generate the bands
numBands = 3
bands = [Image.new('L',imageSet[0].size)] * numBands
for k, img in enumerate(imageSet):
whichBand = k % numBands
bands[whichBand] = ImageChops.add(bands[whichBand], img)
# Make final composite for this scale
compositeImage = Image.merge(mode='RGB', bands=bands)
return compositeImage
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
if False:
def _getEffectiveOrients(self):
"""
Internal helper method that returns the number of "effective"
orientations (which treats the dual phases responses as a
single orientation.)
"""
numEffectiveOrients = self._numPlanes
if self._phaseMode == 'dual':
numEffectiveOrients /= 2
if self._centerSurround:
numEffectiveOrients -= 1
return numEffectiveOrients
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
def _buildGaborBank(self):
"""
Build an array of Gabor filters. Also build a 1-D vector of
filter bank indices that maps each output location to a particular
(customized) bank of gabor filters.
"""
# Make sure dimensions of our Gabor filters are odd
assert self._filterDim % 2 == 1
# Create mesh grid indices. The result will be a numpy array of
# shape (2, filterDim, filterDim).
# Then meshGrid[0] stores the row indices of the master grid,
# and meshGrid[1] stores the column indices.
lowerIndex = -(self._filterDim / 2)
upperIndex = 1 + self._filterDim / 2
meshGrid = numpy.mgrid[lowerIndex:upperIndex, lowerIndex:upperIndex]
# If we are supposed to produce only center-surround output
# (no oriented responses), then we will still go through the
# process of making a minimalist bank of 2 oriented gabor
# filters since that is needed by the center-surround filter
# generation code
numOrientations = self._numOrientations
if numOrientations == 0:
numOrientations = 2
# Select the orientation sample points (in radians)
radianInterval = numpy.pi / float(numOrientations)
orientations = numpy.array(range(numOrientations), dtype=RealNumpyDType) * \
radianInterval
# Compute trigonometric functions of orientation
sinTheta = numpy.sin(orientations).reshape(numOrientations, 1, 1)
cosTheta = numpy.cos(orientations).reshape(numOrientations, 1, 1)
# Construct two filterDim X filterDim arrays containing y (row) and
# x (column) coordinates (in dimensions of pixels), respectively.
y = meshGrid[0].reshape(1, self._filterDim, self._filterDim)
x = meshGrid[1].reshape(1, self._filterDim, self._filterDim)
X = x * cosTheta - y * sinTheta
Y = x * sinTheta + y * cosTheta
# Build the Gabor filters
#if hasattr(self, '_phase') and self._phase == 'edge':
if self._targetType == 'edge':
sinusoidalTerm = numpy.sin(2.0 * numpy.pi / self._wavelength * X)
else:
sinusoidalTerm = numpy.cos(2.0 * numpy.pi / self._wavelength * X)
numerator = (X * X + self._aspectRatio * self._aspectRatio * Y * Y)
denominator = -2.0 * self._effectiveWidth * self._effectiveWidth
exponentialTerm = numpy.exp(numerator / denominator)
gaborBank = sinusoidalTerm * exponentialTerm
# Add center-surround filters, if requsted
if self._centerSurround:
expFilter = exponentialTerm[0] * exponentialTerm[numOrientations/2]
# Cubing the raw exponential component seems to give a nice
# center-surround filter
centerSurround = expFilter * expFilter * expFilter
# If our center-surround filter is in addition to the oriented
# filter, then concatenate it to our filter bank; otherwise
# it is the filter bank
if self._numOrientations > 0:
gaborBank = numpy.concatenate((gaborBank, centerSurround[numpy.newaxis,:,:]))
else:
gaborBank = centerSurround[numpy.newaxis,:,:]
# Apply lobe suppression: Suppress the outer lobes of the sinusoidal
# component of the Gabor filters so as to avoid "ringing" effects in
# the Gabor response maps.
#
# We make a single lobe-suppression mask (which is directionally
# oriented.) Then we rotate this mask by each orientation and
# apply it to the pre-suppressed filter bank.
# In order to minimize discontinuities in the gradients, the
# suppression mask will be constructed as follows:
#
# y = 1 - |x|^p
#
# where:
# y = Suppression (0 for total suppression, 1 for no-suppression)
# x = position relative to center
# p = Some exponent that controls the sharpness of suppression
numGaborFilters = gaborBank.shape[0]
# New lobe suppression.
if self._lobeSuppression:
# The orientation is always vertical, so we'll locate the discrete
# filter cell where we go negative
halfFilterDim = (self._filterDim - 1) / 2
firstBadCell = None
for cellIdx in xrange(halfFilterDim, self._filterDim):
if gaborBank[0, 0, cellIdx] < 0.0:
firstBadCell = cellIdx - halfFilterDim
break
if firstBadCell is not None:
radialDist = numpy.abs(X / float(halfFilterDim))
# Establish a radial distance threshold that is halfway
# between the first discrete bad cell and the last good cell.
if firstBadCell == halfFilterDim:
distThresh = 0.5 * (radialDist[0, 0, halfFilterDim + firstBadCell] + \
radialDist[0, 0, halfFilterDim + firstBadCell - 1])
else:
assert firstBadCell < halfFilterDim
# Establish a radial distance threshold that is halfway
# between the first discrete bad cell and the second bad cell.
# This seems to give good results in practice.
distThresh = 0.5 * (radialDist[0, 0, halfFilterDim + firstBadCell] + \
radialDist[0, 0, halfFilterDim + firstBadCell + 1])
suppressTerm = (radialDist < distThresh).astype(RealNumpyDType)
if self._centerSurround:
suppressTerm = numpy.concatenate((suppressTerm,
numpy.ones((1, self._filterDim, self._filterDim),
dtype=RealNumpyDType)))
gaborBank *= suppressTerm
# Normalize so that mean of each filter is zero
means = gaborBank.mean(axis=2).mean(axis=1).reshape(numGaborFilters, 1, 1)
offsets = means.repeat(self._filterDim, axis=1).repeat(self._filterDim, axis=2)
gaborBank -= offsets
# Normalize so that sum of squares over each filter is one
squareSums = (gaborBank * gaborBank).sum(axis=2).sum(axis=1).reshape(numGaborFilters, 1, 1)
scalars = 1.0 / numpy.sqrt(squareSums)
gaborBank *= scalars
# Log gabor filters to disk
if self._logPrefix:
for k in xrange(numGaborFilters):
img = Image.new('L', (self._filterDim, self._filterDim))
minVal = gaborBank[k].min()
gaborFilter = gaborBank[k] - minVal
gaborFilter *= (254.99 / gaborFilter.max())
img.putdata(gaborFilter.flatten().astype(numpy.uint8))
img.save("%s.filter.%03d.png" % (self._logPrefix, k))
# Store the Gabor Bank as a transposed set of 'numOrients' 1-D column-vectors
# which can be easily dot-producted-ed against the split input vectors
# during our compute() calls.
self._gaborBank = (gaborBank.astype(numpy.float32) * 4096.0).astype(numpy.int32)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
@classmethod
def getSpec(cls):
ns = Spec(description = cls.__doc__,
singleNodeOnly=False)
ns.inputs = dict(
bottomUpIn=InputSpec(
description="""The input signal, conceptually organized as an
image pyramid data structure, but internally
organized as a flattened vector.""",
dataType='float',
regionLevel=False,
requireSplitterMap=False),
validRegionIn=InputSpec(
description="""A bounding box around the valid region of the image,
expressed in pixel coordinates; if the first element
of the bounding box is negative, then the valid
region is specified by 'validAlphaIn', in the form
of a non-rectangular alpha channel.""",
dataType='float',
regionLevel=True,
requireSplitterMap=False),
validAlphaIn=InputSpec(
description="""An alpha channel that may be used (in place of the
'validRegionIn' bounding box) to specify the valid
region of the image on a per-pixel basis; the channel
should be an image of identical size to the finest
resolution data input image.""",
dataType='float',
regionLevel=True,
requireSplitterMap=False)
)
ns.outputs = dict(
bottomUpOut=OutputSpec(
description="""The output signal, conceptually organized as an
image pyramid data structure, but internally
organized as a flattened vector.""",
dataType='float',
count=0,
regionLevel=False,
isDefaultOutput=True
),
topDownOut=OutputSpec(
description="""The feedback output signal, sent to the topDownIn
input of the next level down.""",
dataType='float',
count=0,
regionLevel=True)
)
ns.parameters = dict(
# -------------------------------------
# Create/Read-only parameters
filterDim=ParameterSpec(dataType='int', accessMode='Create',
description="""
The size (in pixels) of both the width and height of the
gabor filters. Defaults to 9x9.
""",
defaultValue=9),
numOrientations=ParameterSpec(dataType='int', accessMode='Create',
description="""
The number of gabor filter orientations to produce.
The half-circle (180 degrees) of rotational angle will be evenly partitioned.
Defaults to 4, which produces a gabor bank containing filters oriented
at 0, 45, 90, and 135 degrees.
"""),
phaseMode=ParameterSpec(dataType='str', accessMode='Create',
description="""
The number of separate phases to compute per orientation.
Valid values are: 'single' or 'dual'. In 'single', responses to each such
orientation are rectified by absolutizing them; i.e., a 90-degree edge
will produce the same responses as a 270-degree edge, and the two
responses will be indistinguishable. In "dual" mode, the responses to
each orientation are rectified by clipping at zero, and then creating
a second output response by inverting the raw response and again clipping
at zero; i.e., a 90-degree edge will produce a response only in the
90-degree-oriented plane, and a 270-degree edge will produce a response
only the dual phase plane associated with the 90-degree plane (an
implicit 270-degree plane.) Default is 'single'.
""",
constraints="enum: single, dual",
defaultValue='single'),
centerSurround=ParameterSpec(dataType='int', accessMode='Create',
description="""
Controls whether an additional filter corresponding to
a non-oriented "center surround" response is applied to the image.
If phaseMode is "dual", then a second "center surround" response plane
is added as well (the inverted version of the center-surround response.)
Defaults to False.
""",
defaultValue=0),
targetType=ParameterSpec(dataType='str', accessMode='Create',
description="""
The preferred "target" of the gabor filters. A value of
'line' specifies that line detectors (peaks in the center and troughs
on either side) are to be used. A value of 'edge' specifies that edge
detectors (with a peak on one side and a trough on the other) are to
be used. Default is 'edge'.
""",
constraints="enum: line,edge",
defaultValue='edge'),
gainConstant=ParameterSpec(dataType='float', accessMode='ReadWrite',
description="""
A multiplicative amplifier that is applied to the gabor
responses after any normalization. Defaults to 1.0; larger values
increase the sensitivity to edges.
"""),
normalizationMethod=ParameterSpec(dataType='str', accessMode='ReadWrite',
description="""
Controls the method by which responses are
normalized on a per image (and per scale) basis. Accepts the following
three legal values:
"fixed": No response normalization;
"max": Applies a global gain value to the responses so that the
max response equals the value of 'gainConstant'
"mean": Applies a global gain value to the responses so that the
mean response equals the value of 'gainConstant'
Default is 'fixed'.
""",
constraints="enum: fixed, mean, max"
),
perPlaneNormalization=ParameterSpec(dataType='int', accessMode='ReadWrite',
description="""
Controls whether normalization (as specified by
'normalizationMethod') is applied globally across all response planes
(for a given scale), or individually to each response plane. Default
is False. Note: this parameter is ignored if normalizationMethod is "fixed".
""",
),
perPhaseNormalization=ParameterSpec(dataType='int', accessMode='ReadWrite',
description="""
Controls whether normalization (as specified by
'normalizationMethod') is applied globally across both phases for a
particular response orientation and scale, or individually to each
phase of the response. Default is True. Note: this parameter is
ignored if normalizationMethod is "fixed".
""",
),
postProcessingMethod=ParameterSpec(dataType='str', accessMode='ReadWrite',
description="""
Controls what type of post-processing (if any)
is to be performed on the normalized responses. Valid value are:
"raw": No post-processing is performed; final output values are
unmodified after normalization
"sigmoid": Passes normalized output values through a sigmoid function
parameterized by 'postProcessingSlope' and 'postProcessingCenter'.
"threshold": Passes normalized output values through a piecewise linear
thresholding function parameterized by 'postProcessingMin'
and 'postProcessingMax'.
""",
constraints="enum: raw, sigmoid, threshold"),
postProcessingSlope=ParameterSpec(dataType='float', accessMode='ReadWrite',
description="""
Specifies the slope of the sigmoid function to apply if the
post-processing mode is set to 'sigmoid'.
"""),
postProcessingCenter=ParameterSpec(dataType='float', accessMode='ReadWrite',
description="""
Specifies the mid-point of the sigmoid function to apply if the
post-processing mode is set to 'sigmoid'.
"""),
postProcessingMin=ParameterSpec(dataType='float', accessMode='ReadWrite',
description="""
Specifies the value below which responses will be clipped to zero
when post-processing mode is set to 'threshold'.
"""),
postProcessingMax=ParameterSpec(dataType='float', accessMode='ReadWrite',
description="""
Specifies the value above which responses will be clipped to one
when post-processing mode is set to 'threshold'.
"""),
zeroThresholdOut=ParameterSpec(dataType='float', accessMode='ReadWrite',
description="""
If all outputs of a gabor node are below this threshold,
they will all be driven to absolute 0. This is useful in conjunction with
using the product mode/don't care spatial pooler which needs to know when
an input should be treated as 0 vs being normalized to sum to 1.
"""),
boundaryMode=ParameterSpec(dataType='str', accessMode='Create',
description="""
Controls how GaborNode deals with boundary effects. Accepts
two valid parameters:
'constrained' -- Gabor responses are normally only computed for image locations
that are far enough from the edge of the input image so that the entire
filter mask fits within the input image. Thus, the spatial dimensions of
the output gabor maps will be smaller than the input image layers.
'sweepOff' -- Gabor responses will be generated at every location within
the input image layer. Thus, the spatial dimensions of the output gabor
maps will be identical to the spatial dimensions of the input image.
For input image locations that are near the edge (i.e., a portion of
the gabor filter extends off the edge of the input image), the values
of pixels that are off the edge of the image are taken to be as specifed
by the parameter 'offImagePixelValue'.
Default is 'constrained'.
""",
constraints='enum: constrained, sweepOff',
defaultValue='constrained'),
offImagePixelValue=ParameterSpec(dataType="str", accessMode='ReadWrite',
description="""
If 'boundaryMode' is set to 'sweepOff', then this
parameter specifies the value of the input pixel to use for "filling"
enough image locations outside the bounds of the original image.
Ignored if 'boundaryMode' is 'constrained'. Default value is 0.
"""
),
suppressOutsideBox=ParameterSpec(dataType='int', accessMode='ReadWrite',
description="""
If True, then gabor responses outside of the bounding
box (provided from the sensor) are suppressed. Internally, the bounding
box is actually expanded by half the filter dimension (respecting the edge
of the image, of course) so that responses can be computed for all image
locations within the original bounding box.
"""),
forceBoxContraction=ParameterSpec(dataType='int', accessMode='ReadWrite',
description="""
Fine-tunes the behavior of bounding box suppression.
If False (the default), then the bounding box will only be 'contracted'
(by the half-width of the filter) in the dimenion(s) in which it is not
the entire span of the image. If True, then the bounding box will be
contracted unconditionally.
"""),
suppressByAlpha=ParameterSpec(dataType='int', accessMode='ReadWrite',
description="""
A boolean that, if True, instructs GaborNode to use
the pixel-accurate alpha mask received on the input 'validAlphaIn' for
the purpose of suppression of responses.
"""),
logPrefix=ParameterSpec(dataType='str', accessMode='ReadWrite',
description="""
If non-None, causes the response planes at each scale, and
for each input image, to be written to disk using the specified prefix
for the name of the log images. Default is None (no such logging.)
"""),
maxTopDownOut=ParameterSpec(dataType='float', accessMode='Read', count=0,
description="""
The max top-down output from each node. It is faster to access this
variable than to fetch the entire top-down output of every node. The
top down image inspector fetches this parameter (if available)
instead of the topDownOut output variable for better performance.
"""),
# -------------------------------------
# Undocumented parameters
nta_aspectRatio=ParameterSpec(dataType='float', accessMode='Create',
description="""
Controls how "fat" (i.e., how oriented) the Gabor
filters are. A value of 1 would produce completely non-oriented
(circular) filters; smaller values will produce a more oriented
filter. Default is 0.3.
""",
defaultValue=0.3),
nta_effectiveWidth=ParameterSpec(dataType='float', accessMode='Create',
description="""
Controls the rate of exponential drop-off in
the Gaussian component of the Gabor filter. Default is 4.5.
""",
defaultValue=4.5),
nta_wavelength=ParameterSpec(dataType='float', accessMode='Create',
description="""
Controls the frequency of the sinusoidal component
of the Gabor filter. Default is 5.6.
""",
defaultValue=5.6),
nta_lobeSuppression=ParameterSpec(dataType='int', accessMode='Create',
description="""
Controls whether or not the secondary lobes of the
Gabor filters are suppressed. The suppression is performed based
on the radial distance from the oriented edge to which the Gabor
filter is tuned. If True, then the secondary lobes produced
by the pure mathematical Gabor equation will be suppressed
and have no effect; if False, then the pure mathematical
Gabor equation (digitized into discrete sampling points, of
course) will be used. Default is True.
""",
defaultValue=1),
nta_debugLogBuffers=ParameterSpec(dataType='int', accessMode='ReadWrite',
description="""
If enabled, causes internal memory buffers used
C implementation to be dumped to disk after each compute()
cycle as an aid in the debugging of the C code path.
Defaults to False.
""",
),
nta_width=ParameterSpec(dataType="int", accessMode='Read',
description="""Width of the maximum resolution."""),
nta_height=ParameterSpec(dataType="int", accessMode='Read',
description="""Width of the maximum resolution."""),
nta_morphologyMethod=ParameterSpec(dataType='str', accessMode='ReadWrite',
description="""
Controls the routines used to perform dilation and erosion of
valid alpha masks. Legal values are:
'opencv' -- use faster OpenCV routines;
'nta' -- use the slower Numenta routines;
'best' -- use OpenCV if it is available on the platform,
otherwise use the slower routines.
Default is 'best'.
"""),
)
return ns.toDict()
#---------------------------------------------------------------------------------
def getOutputElementCount(self, name):
"""This method will be called only when the node is used in nuPIC 2"""
if name == 'bottomUpOut':
return self.getNumPlanes()
elif name == 'topDownOut':
return 0
else:
raise Exception('Unknown output: ' + name)
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
# Command line unit testing
#+=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=++=+=+=+=+=+=+=+=+=+=+=+
if __name__=='__main__':
from nupic.engine import Network
n = Network()
gabor = n.addRegion(
'gabor',
'py.GaborNode2',
"""{ filterDim: 5,
numOrientations: 2,
centerSurround: 1,
phaseMode: single,
targetType: edge,
gainConstant: 1.0,
normalizationMethod: max,
postProcessingMethod: threshold,
postProcessingMin: 0.15,
postProcessingMax: 1.0,
boundaryMode: sweepOff,
#suppressOutsideBox: False,
#suppressByAlpha: True,
offImagePixelValue: colorKey,
zeroThresholdOut: 0.003
}""")
print 'Done.'
|
neuroidss/nupic.vision
|
nupicvision/regions/extra/GaborNode2.py
|
Python
|
gpl-3.0
| 141,814
|
[
"Gaussian"
] |
e567b0f0134b5797138cac16004db1c3c5fd6663ca4c323a2247bad3b03d102d
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A node transformer that includes utilities for SCT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import gast
import six
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import pretty_printer
from tensorflow.python.autograph.pyct import templates
class AutoGraphParseError(SyntaxError):
pass
# TODO(mdan): Use namedtuple.
class EntityInfo(object):
"""Contains information about a Python entity.
Immutable.
Examples of entities include functions and classes.
Attributes:
source_code: The entity's source code.
source_file: The entity's source file.
namespace: Dict[str, ], containing symbols visible to the entity (excluding
parameters).
arg_values: dict[str->*], containing parameter values, if known.
arg_types: dict[str->*], containing parameter types, if known.
owner_type: The surrounding class type of the function, if present.
"""
# TODO(mdan): Remove the default and update tests.
def __init__(self, source_code, source_file, namespace, arg_values, arg_types,
owner_type):
self.source_code = source_code
self.source_file = source_file
self.namespace = namespace
self.arg_values = {} if arg_values is None else arg_values
self.arg_types = {} if arg_types is None else arg_types
self.owner_type = owner_type
class _StateStack(object):
"""Typed stack abstraction.
This class provides syntactic sugar for a stack of objects of known
type. It allows accessing attributes of the object at the top of the stack
directly against this object, which allows for very terse syntax.
For example, this code:
stack = _StateStack(Foo)
stack.enter()
stack.bar
Is equivalent to:
stack = []
stack.append(Foo())
foo = stack[-1]
foo.bar
See _State for more on how this is used.
Attributes:
type: Any, the type of objects that this stack holds
level: int, the current stack depth
value: Any, the instance of the object at the top of the stack
"""
def __init__(self, type_):
# Because we override __setattr__, we need to attach these attributes using
# the superclass' setattr.
object.__setattr__(self, 'type', type_)
object.__setattr__(self, '_stack', [])
if not hasattr(type_, 'no_root'):
self.enter()
def enter(self):
self._stack.append(self.type())
def exit(self):
return self._stack.pop()
@property
def level(self):
return len(self._stack)
@property
def value(self):
return self._stack[-1]
def __iter__(self):
return iter(self._stack)
def __getattr__(self, key):
return getattr(self._stack[-1], key)
def __setattr__(self, key, value):
setattr(self._stack[-1], key, value)
class _State(object):
"""Supporting class for nested scope variable space for converter.Base.
This structure offers syntactic sugar over a dict of stacks of objects
of known type. These structures are useful to keep state during AST walks.
Multiple different scopes can be tracked in parallel. For example:
s = _State()
s[foo].enter()
s[bar].enter() # this will not affect s[foo]
Element access has special semantics:
* keys are a data type
* element values are _StateStack(type=key) objects
* missing elements are automatically added, similarly to defaultdict
For example, the following block :
_State s
s[Foo]
Is equivalent to:
s = {}
if Foo not in s:
s[Foo] = Foo()
s[Foo]
See Base for how it's used.
"""
def __init__(self):
self._value = {}
def __getitem__(self, key):
if key not in self._value:
self._value[key] = _StateStack(key)
return self._value[key]
class Base(gast.NodeTransformer):
"""Base class for general-purpose code transformers transformers.
This is an extension of ast.NodeTransformer that provides a few additional
functions, like state tracking within the scope of arbitrary node, helpers
for processing code blocks, debugging, mapping of transformed code to
original code, and others.
Scope-local state tracking: to keep state across nodes, at the level of
(possibly nested) scopes, use enter/exit_local_scope and set/get_local.
You must call enter/exit_local_scope manually, but the transformer detects
when they are not properly paired.
The transformer allows keeping state across calls to visit_* that is local to
arbitrary nodes and their descendants, using the self.state attribute.
Multiple independent scopes are allowed and automatically constructed.
For example, to keep track of the If node that encloses any Name node, one can
write:
class FooType(object):
def __init__(self):
self.foo_property = None
class DummyTransformer(Base):
def visit_If(self, node):
self.state[FooType].enter()
self.state[FooType].foo_property = node
def visit_Name(self, node):
self.state[FooType].foo_property # will hold the innermost enclosing if
"""
# TODO(mdan): Document all extra features.
def __init__(self, entity_info):
"""Initialize the transformer.
Subclasses should call this.
Args:
entity_info: An EntityInfo object.
"""
self._current_origin = None
self._lineno = 0
self._col_offset = 0
# TODO(znado): remove this from the constructor of all Transformers.
self.entity_info = entity_info
self._enclosing_entities = []
# A stack that allows keeping mutable, scope-local state where scopes may be
# nested. For example, it can be used to track the usage of break
# statements in each loop, where loops may be nested.
self._local_scope_state = []
self.enter_local_scope()
# Allows scoping of local variables to keep state across calls to visit_*
# methods. Multiple scope hierchies may exist and are keyed by tag. A scope
# is valid at one or more nodes and all its children. Scopes created in
# child nodes supersede their parent. Scopes are isolated from one another.
self.state = _State()
@property
def enclosing_entities(self):
return tuple(self._enclosing_entities)
@property
def local_scope_level(self):
return len(self._local_scope_state)
def enter_local_scope(self, inherit=None):
"""Deprecated.
Use self.state instead.
Marks entry into a new local scope.
Args:
inherit: Optional enumerable of variable names to copy from the parent
scope.
"""
scope_entered = {}
if inherit:
this_scope = self._local_scope_state[-1]
for name in inherit:
if name in this_scope:
scope_entered[name] = this_scope[name]
self._local_scope_state.append(scope_entered)
def exit_local_scope(self, keep=None):
"""Deprecated.
Use self.state instead.
Marks exit from the current local scope.
Args:
keep: Optional enumerable of variable names to copy into the parent scope.
Returns:
A dict containing the scope that has just been exited.
"""
scope_left = self._local_scope_state.pop()
if keep:
this_scope = self._local_scope_state[-1]
for name in keep:
if name in scope_left:
this_scope[name] = scope_left[name]
return scope_left
def set_local(self, name, value):
"""Deprecated. Use self.state instead."""
self._local_scope_state[-1][name] = value
def get_local(self, name, default=None):
"""Deprecated. Use self.state instead."""
return self._local_scope_state[-1].get(name, default)
def debug_print(self, node):
"""Helper method useful for debugging."""
if __debug__:
print(pretty_printer.fmt(node))
return node
def create_assignment(self, target, expression):
template = """
target = expression
"""
return templates.replace(template, target=target, expression=expression)
def visit_block(self, nodes, before_visit=None, after_visit=None):
"""A more powerful version of generic_visit for statement blocks.
An example of a block is the body of an if statement.
This function allows specifying a postprocessing callback (the
after_visit argument) argument which can be used to move nodes to a new
destination. This is done by after_visit by returning a non-null
second return value, e.g. return new_node, new_destination.
For example, a transformer could perform the following move:
foo()
bar()
baz()
foo()
if cond:
bar()
baz()
The above could be done with a postprocessor of this kind:
def after_visit(node):
if node_is_function_call(bar):
new_container_node = build_cond()
new_container_node.body.append(node)
return new_container_node, new_container_node.body
else:
# Once we set a new destination, all subsequent items will be
# moved to it, so we don't need to explicitly handle baz.
return node, None
Args:
nodes: enumerable of AST node objects. If None, the function returns None.
before_visit: optional callable that is called before visiting each item
in nodes
after_visit: optional callable that takes in an AST node and returns a
tuple (new_node, new_destination). It is called after visiting each item
in nodes. Is used in the same was as the
visit_* methods: new_node will replace the node; if not None,
new_destination must be a list, and subsequent nodes will be placed
in this list instead of the list returned by visit_block.
Returns:
A list of AST node objects containing the transformed items fron nodes,
except those nodes that have been relocated using after_visit.
"""
if nodes is None:
return None
results = []
node_destination = results
for node in nodes:
if before_visit:
# TODO(mdan): We can modify node here too, if ever needed.
before_visit()
replacement = self.visit(node)
if after_visit and replacement:
replacement, new_destination = after_visit(replacement)
else:
new_destination = None
if replacement:
if isinstance(replacement, (list, tuple)):
node_destination.extend(replacement)
else:
node_destination.append(replacement)
# Allow the postprocessor to reroute the remaining nodes to a new list.
if new_destination is not None:
node_destination = new_destination
return results
# TODO(mdan): Remove.
def apply_to_single_assignments(self, targets, values, apply_fn):
"""Applies a function to each individual assignment.
This function can process a possibly-unpacked (e.g. a, b = c, d) assignment.
It tries to break down the unpacking if possible. In effect, it has the same
effect as passing the assigned values in SSA form to apply_fn.
Examples:
The following will result in apply_fn(a, c), apply_fn(b, d):
a, b = c, d
The following will result in apply_fn(a, c[0]), apply_fn(b, c[1]):
a, b = c
The following will result in apply_fn(a, (b, c)):
a = b, c
It uses the visitor pattern to allow subclasses to process single
assignments individually.
Args:
targets: list, tuple of or individual AST node. Should be used with the
targets field of an ast.Assign node.
values: an AST node.
apply_fn: a function of a single argument, which will be called with the
respective nodes of each single assignment. The signature is
apply_fn(target, value), no return value.
"""
if not isinstance(targets, (list, tuple)):
targets = (targets,)
for target in targets:
if isinstance(target, (gast.Tuple, gast.List)):
for i in range(len(target.elts)):
target_el = target.elts[i]
if isinstance(values, (gast.Tuple, gast.List)):
value_el = values.elts[i]
else:
value_el = gast.Subscript(values, gast.Index(i), ctx=gast.Store())
self.apply_to_single_assignments(target_el, value_el, apply_fn)
else:
# TODO(mdan): Look into allowing to rewrite the AST here.
apply_fn(target, values)
def _get_source(self, node):
try:
source, _ = compiler.ast_to_source(node)
return source
# pylint: disable=broad-except
# This function is used for error reporting. If an exception occurs here,
# it should be suppressed, in favor of emitting as informative a message
# about the original error as possible.
except Exception:
return '<could not convert AST to source>'
def visit(self, node):
if not isinstance(node, gast.AST):
# This is not that uncommon a mistake: various node bodies are lists, for
# example, posing a land mine for transformers that need to recursively
# call `visit`. The error needs to be raised before the exception handler
# below is installed, because said handler will mess up if `node` is not,
# in fact, a node.
msg = ('invalid value for "node": expected "ast.AST", got "{}"; to'
' visit lists of nodes, use "visit_block" instead').format(
type(node))
raise ValueError(msg)
did_enter_function = False
local_scope_size_at_entry = len(self._local_scope_state)
processing_expr_node = False
try:
parent_origin = self._current_origin
if isinstance(node, (gast.FunctionDef, gast.ClassDef, gast.Lambda)):
did_enter_function = True
elif isinstance(node, gast.Expr):
processing_expr_node = True
if did_enter_function:
self._enclosing_entities.append(node)
if anno.hasanno(node, anno.Basic.ORIGIN):
self._current_origin = anno.getanno(node, anno.Basic.ORIGIN)
if processing_expr_node:
entry_expr_value = node.value
if not anno.hasanno(node, anno.Basic.SKIP_PROCESSING):
result = super(Base, self).visit(node)
self._current_origin = parent_origin
# Adjust for consistency: replacing the value of an Expr with
# an Assign node removes the need for the Expr node.
if processing_expr_node:
if isinstance(result, gast.Expr) and result.value != entry_expr_value:
# When the replacement is a list, it is assumed that the list came
# from a template that contained a number of statements, which
# themselves are standalone and don't require an enclosing Expr.
if isinstance(result.value,
(list, tuple, gast.Assign, gast.AugAssign)):
result = result.value
# On exception, the local scope integrity is not guaranteed.
if did_enter_function:
self._enclosing_entities.pop()
if local_scope_size_at_entry != len(self._local_scope_state):
raise AssertionError(
'Inconsistent local scope stack. Before entering node %s, the'
' stack had length %d, after exit it has length %d. This'
' indicates enter_local_scope and exit_local_scope are not'
' well paired.' % (node, local_scope_size_at_entry,
len(self._local_scope_state)))
return result
except (ValueError, AttributeError, KeyError, NotImplementedError) as e:
if not self._current_origin:
raise e
original_file_path = self._current_origin.loc.filename
original_line_number = self._current_origin.loc.lineno
original_col_offset = self._current_origin.loc.col_offset
original_source_line = self._current_origin.source_code_line
msg = '%s: %s.' % (e.__class__.__name__, str(e))
# TODO(mdan): Avoid the printing of the original exception.
# In other words, we need to find how to suppress the "During handling
# of the above exception, another exception occurred" message.
six.reraise(
AutoGraphParseError,
AutoGraphParseError(msg, (original_file_path, original_line_number,
original_col_offset, original_source_line)),
sys.exc_info()[2])
finally:
self._current_origin = parent_origin
|
gautam1858/tensorflow
|
tensorflow/python/autograph/pyct/transformer.py
|
Python
|
apache-2.0
| 17,057
|
[
"VisIt"
] |
f078f89c00921116d35d084f1ed3761ee0893214eae707484b0308c80b48b8d9
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-wms-job-delete
# Author : Stuart Paterson
########################################################################
"""
Retrieve parameters associated to the given DIRAC job
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... JobID ...' % Script.scriptName,
'Arguments:',
' JobID: DIRAC Job ID' ] ) )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if len( args ) < 1:
Script.showHelp()
from DIRAC.Interfaces.API.Dirac import Dirac, parseArguments
dirac = Dirac()
exitCode = 0
errorList = []
for job in parseArguments( args ):
result = dirac.getJobParameters( job, printOutput = True )
if not result['OK']:
errorList.append( ( job, result['Message'] ) )
exitCode = 2
for error in errorList:
print "ERROR %s: %s" % error
DIRAC.exit( exitCode )
|
arrabito/DIRAC
|
Interfaces/scripts/dirac-wms-job-parameters.py
|
Python
|
gpl-3.0
| 1,226
|
[
"DIRAC"
] |
7fbe3d8c4d4f459873a568706e281d170059da9210916d2e56068dbe06c3557b
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Created on Apr 28, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Apr 28, 2012"
import unittest2 as unittest
import os
from pymatgen.core.structure import Molecule
from pymatgen.io.xyz import XYZ
from pymatgen.io.babel import BabelMolAdaptor
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
"test_files", "molecules")
try:
import openbabel as ob
import pybel as pb
except ImportError:
pb = None
ob = None
@unittest.skipIf(not (pb and ob), "OpenBabel not present. Skipping...")
class BabelMolAdaptorTest(unittest.TestCase):
def setUp(self):
coords = [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
self.mol = Molecule(["C", "H", "H", "H", "H"], coords)
def test_init(self):
adaptor = BabelMolAdaptor(self.mol)
obmol = adaptor.openbabel_mol
self.assertEqual(obmol.NumAtoms(), 5)
adaptor = BabelMolAdaptor(adaptor.openbabel_mol)
self.assertEqual(adaptor.pymatgen_mol.formula, "H4 C1")
def test_from_file(self):
adaptor = BabelMolAdaptor.from_file(
os.path.join(test_dir, "Ethane_e.pdb"), "pdb")
mol = adaptor.pymatgen_mol
self.assertEqual(mol.formula, "H6 C2")
def test_from_string(self):
xyz = XYZ(self.mol)
adaptor = BabelMolAdaptor.from_string(str(xyz), "xyz")
mol = adaptor.pymatgen_mol
self.assertEqual(mol.formula, "H4 C1")
def test_localopt(self):
self.mol[1] = "H", [0, 0, 1.05]
adaptor = BabelMolAdaptor(self.mol)
adaptor.localopt()
optmol = adaptor.pymatgen_mol
for site in optmol[1:]:
self.assertAlmostEqual(site.distance(optmol[0]), 1.09216, 2)
if __name__ == "__main__":
unittest.main()
|
aykol/pymatgen
|
pymatgen/io/tests/test_babel.py
|
Python
|
mit
| 2,273
|
[
"Pybel",
"pymatgen"
] |
e99ae2d5866f847030166e9176d1631dfa8be605e61d89dbb7a3a8d126f4a3f4
|
import ocl
import camvtk
import time
import vtk
import datetime
import math
def drawLoops(myscreen, loops, loopcolor):
nloop = 0
for lop in loops:
n = 0
N = len(lop)
first_point=ocl.Point(-1,-1,5)
previous=ocl.Point(-1,-1,5)
for p in lop:
if n==0: # don't draw anything on the first iteration
previous=p
first_point = p
elif n== (N-1): # the last point
myscreen.addActor( camvtk.Line(p1=(previous.x,previous.y,previous.z),p2=(p.x,p.y,p.z),color=loopcolor) ) # the normal line
# and a line from p to the first point
myscreen.addActor( camvtk.Line(p1=(p.x,p.y,p.z),p2=(first_point.x,first_point.y,first_point.z),color=loopcolor) )
else:
myscreen.addActor( camvtk.Line(p1=(previous.x,previous.y,previous.z),p2=(p.x,p.y,p.z),color=loopcolor) )
previous=p
n=n+1
print "rendered loop ",nloop, " with ", len(lop), " points"
nloop = nloop+1
def drawFibers(myscreen, fibs, fibcolor):
for f in fibs:
#print f
for i in f.getInts():
#print i
p1 = f.point(i.lower)
p2 = f.point(i.upper)
#print p1
#print p2
myscreen.addActor( camvtk.Line(p1=(p1.x,p1.y,p1.z),p2=(p2.x,p2.y,p2.z),color=fibcolor) )
if __name__ == "__main__":
print ocl.revision()
myscreen = camvtk.VTKScreen()
a = ocl.Point(0,1,0.3)
myscreen.addActor(camvtk.Point(center=(a.x,a.y,a.z), color=(1,0,1)))
b = ocl.Point(1,1,0.3)
myscreen.addActor(camvtk.Point(center=(b.x,b.y,b.z), color=(1,0,1)))
c = ocl.Point(0,0,0.1)
c = ocl.Point(0,0,-2.1)
#c = ocl.Point(0.5,0.5,-10)
myscreen.addActor(camvtk.Point(center=(c.x,c.y,c.z), color=(1,0,1)))
myscreen.addActor( camvtk.Line(p1=(a.x,a.y,a.z),p2=(c.x,c.y,c.z)) )
myscreen.addActor( camvtk.Line(p1=(c.x,c.y,c.z),p2=(b.x,b.y,b.z)) )
myscreen.addActor( camvtk.Line(p1=(a.x,a.y,a.z),p2=(b.x,b.y,b.z)) )
t = ocl.Triangle(b,c,a)
s = ocl.STLSurf()
s.addTriangle(t) # a one-triangle STLSurf
zh=0.25 # the z-coordinates for the waterlines
diam = 0.31 # run the thing for all these cutter diameters
length = 5
loops = []
#cutter = ocl.CylCutter( diam , length )
#cutter = ocl.BallCutter( diam , length )
#cutter = ocl.BullCutter( diam , diam/5, length )
cutter = ocl.ConeCutter( diam , math.pi/4, length )
wl = ocl.Waterline()
#wl.setThreads(1)
wl.setSTL(s)
wl.setCutter(cutter)
wl.setZ(zh)
wl.setSampling(0.05)
t_before = time.time()
wl.run()
t_after = time.time()
calctime = t_after-t_before
print " Waterline done in ", calctime," s"
cutter_loops = wl.getLoops()
for l in cutter_loops:
loops.append(l)
aloops = []
awl = ocl.AdaptiveWaterline()
awl.setSTL(s)
awl.setCutter(cutter)
awl.setZ(zh)
awl.setSampling(0.05)
awl.setMinSampling(0.0001)
t_before = time.time()
awl.run()
t_after = time.time()
calctime = t_after-t_before
print " AdaptiveWaterline done in ", calctime," s"
xf = awl.getXFibers()
print " got ", len(xf)," x-fibers"
yf = awl.getYFibers()
print " got ", len(yf)," y-fibers"
drawFibers(myscreen, xf, camvtk.red)
drawFibers(myscreen, yf, camvtk.green)
acutter_loops = awl.getLoops()
#acutter_loops = []
for l in acutter_loops:
l2=[]
zofs = 0.0
for p in l:
p2 = p + ocl.Point(0,0,zofs)
l2.append(p2)
aloops.append(l2)
drawLoops(myscreen, loops, camvtk.yellow)
drawLoops(myscreen, aloops, camvtk.mag)
print "done."
myscreen.camera.SetPosition(2, 2, 2)
myscreen.camera.SetFocalPoint(0.5, 0.5, 0)
camvtk.drawArrows(myscreen,center=(-0.5,-0.5,-0.5))
camvtk.drawOCLtext(myscreen)
myscreen.render()
myscreen.iren.Start()
#raw_input("Press Enter to terminate")
|
tectronics/opencamlib
|
scripts/waterline/waterline_3_onetriangle_adapt.py
|
Python
|
gpl-3.0
| 4,077
|
[
"VTK"
] |
59fbb581fb3d8093366d1031a7d24c85b50ba3df911deef15963279cd28b0a8c
|
import astroid
from astroid.node_classes import NodeNG
import hypothesis.strategies as hs
from hypothesis import assume
from python_ta.transforms.type_inference_visitor import TypeInferer
from keyword import iskeyword
from hypothesis import settings
from typing import Callable, Tuple, List, Union
settings.register_profile("pyta", settings(max_examples=10))
# Custom strategies for hypothesis testing framework
primitive_types = hs.sampled_from([
hs.integers,
hs.booleans,
lambda: hs.floats(allow_nan=False, allow_infinity=False),
hs.none,
hs.text,
hs.binary
])
primitive_values = primitive_types.flatmap(lambda s: s())
# Strategies for generating Indexes
index_types = hs.sampled_from([
hs.integers,
lambda: hs.text(alphabet="abcdefghijklmnopqrstuvwxyz", min_size=1)
])
index_values = index_types.flatmap(lambda s: s())
numeric_types = hs.sampled_from([
hs.integers,
lambda: hs.floats(allow_nan=False, allow_infinity=False)
])
numeric_values = numeric_types.flatmap(lambda s: s())
# Strategies for generating Binary Operators
non_bool_symbols = ['+', '-', '*', '//', '%', '/', '**', '&', '^', '|', '<<', '>>']
non_boolean_operator = hs.sampled_from(non_bool_symbols)
non_bool_unary_op = hs.sampled_from(['-', '+', '~'])
# Strategy for genearting Comparison Operators
comparator_symbols = ['<', '>']
comparator_operator = hs.sampled_from(comparator_symbols)
comparator_symbols_equality = ['==', '!=', '>=', '<=', 'is']
comparator_operator_equality = hs.sampled_from(comparator_symbols_equality)
# Strategy for generating Boolean Operators
binary_bool_operator = hs.sampled_from(['and', 'or'])
unary_bool_operator = hs.sampled_from(['not'])
# Strategies for generating builtin type names
builtin_types = [bool, bytearray, bytes, complex, dict, enumerate,
float, frozenset, int, list, set, str, tuple]
builtin_type = hs.sampled_from(builtin_types)
annotation = hs.sampled_from(builtin_types).map(lambda s: s.__name__)
def valid_identifier(**kwargs):
"""Return a strategy which generates a valid Python Identifier"""
return hs.integers(min_value=0, max_value=1000).flatmap(
lambda n: hs.just(f'x{n}')
)
def homogeneous_list(**kwargs):
"""Return a strategy which generates a list of uniform type."""
return primitive_types.flatmap(lambda s: hs.lists(s(), **kwargs))
def numeric_list(**kwargs):
"""Return a strategy which generates a list of uniform numeric types."""
return numeric_types.flatmap(lambda s: hs.lists(s(), **kwargs))
def random_list(**kwargs):
"""Return a strategy which generates a random list."""
return hs.lists(primitive_values, **kwargs)
def homogeneous_dictionary(**kwargs):
"""Return a strategy which generates a dictionary of uniform key:value type."""
return index_types.flatmap(lambda s: hs.dictionaries(s(), s(), **kwargs))
def random_dictionary(**kwargs):
"""Return a strategy which generates a random list."""
return hs.dictionaries(primitive_values, primitive_values, **kwargs)
def random_dict_variable_homogeneous_value(**kwargs):
"""Return a strategy which generates a random dictionary of variable name and value"""
return primitive_types.flatmap(lambda s: hs.dictionaries(valid_identifier(), s(), **kwargs))
homogeneous_iterable = hs.sampled_from([
lambda: homogeneous_dictionary(min_size=1),
lambda: homogeneous_list(min_size=1),
]).flatmap(lambda s: s())
heterogeneous_iterable = hs.sampled_from([
lambda: random_dictionary(min_size=1),
lambda: random_list(min_size=1),
lambda: hs.sets(primitive_values, min_size=1)
]).flatmap(lambda s: s())
def _parse_dictionary_to_program(variables_dict):
program = ""
# parse dictionary into input program
for variable_name in variables_dict:
assume(not iskeyword(variable_name))
program += variable_name + " = " + repr(variables_dict[variable_name]) + "\n"
return program
# Strategies for generating Python ASTs.
# These are named after the corresponding nodes.
@hs.composite
def binop_node(draw, left=None, op=non_boolean_operator, right=None):
left = left or const_node()
right = right or const_node()
node = astroid.BinOp(draw(op))
node.postinit(draw(left), draw(right))
return node
@hs.composite
def boolop_node(draw, value=None, op=binary_bool_operator, **kwargs):
value = value or const_node()
node = astroid.BoolOp(draw(op))
if kwargs.get('min_size', 0) < 2:
kwargs['min_size'] = 2
node.postinit(draw(hs.lists(value, **kwargs)))
return node
@hs.composite
def comprehension_node(draw, target=None, iter=None,
ifs=hs.just([])):
target = target or const_node(valid_identifier())
iter = iter or list_node()
node = astroid.Comprehension()
node.postinit(draw(target), draw(iter), draw(ifs))
return node
@hs.composite
def const_node(draw, value=primitive_values):
"""Return a Const node with value drawn from <value>."""
return astroid.Const(draw(value))
@hs.composite
def dict_node(draw, key=const_node(), value=const_node(), **kwargs):
items = draw(hs.dictionaries(key, value, **kwargs)).items()
node = astroid.Dict()
node.postinit(items)
return node
@hs.composite
def expr_node(draw, value=None):
value = value or expr
node = astroid.Expr()
node.postinit(draw(value))
return node
@hs.composite
def ifexp_node(draw, test=const_node(hs.booleans()),
expr=const_node(), orelse=const_node()):
# TODO: Add an option for whether expr and orelse strategies produce the same type.
test = draw(test)
expr = draw(expr)
node = astroid.IfExp()
node.postinit(test, expr, expr)
return node
@hs.composite
def index_node(draw, value=const_node(hs.integers())):
node = astroid.Index()
node.postinit(draw(value))
return node
@hs.composite
def set_node(draw, elt=const_node(), **kwargs):
"""Return a Set node with elements drawn from elt.
"""
node = astroid.Set()
node.postinit(draw(hs.sets(elt, **kwargs)))
return node
@hs.composite
def setcomp_node(draw, elt=const_node(),
generators=hs.lists(comprehension_node(),
min_size=1, average_size=1)):
node = astroid.SetComp()
node.postinit(draw(elt), draw(generators))
return node
@hs.composite
def list_node(draw, elt=const_node(), **kwargs):
"""Return a List node with elements drawn from elt.
"""
node = astroid.List()
node.postinit(draw(hs.lists(elt, **kwargs)))
return node
@hs.composite
def listcomp_node(draw, elt=const_node(),
generators=hs.lists(comprehension_node(),
min_size=1, average_size=1)):
node = astroid.ListComp()
node.postinit(draw(elt), draw(generators))
return node
@hs.composite
def slice_node(draw):
lower = draw(hs.one_of(const_node(hs.integers()), hs.none()))
upper = draw(hs.one_of(const_node(hs.integers()), hs.none()))
step = draw(hs.one_of(const_node(hs.integers()), hs.none()))
node = astroid.Slice()
node.postinit(lower, upper, step)
return node
@hs.composite
def subscript_node(draw, value=None, slice=index_node()):
value = value or subscriptable_expr
node = astroid.Subscript()
node.postinit(
draw(value),
draw(slice)
)
return node
@hs.composite
def tuple_node(draw, elt=const_node, **kwargs):
"""Return a Tuple node with elements drawn from elt.
"""
elts = draw(hs.lists(elt(), **kwargs, min_size=1))
node = astroid.Tuple()
node.postinit(elts)
return node
@hs.composite
def unaryop_node(draw, op=hs.one_of(non_bool_unary_op, unary_bool_operator),
operand=const_node()):
op = draw(op)
operand = draw(operand)
node = astroid.UnaryOp(op)
node.postinit(operand)
return node
@hs.composite
def simple_homogeneous_dict_node(draw, **kwargs):
k = draw(primitive_types)
v = draw(primitive_types)
return draw(dict_node(
const_node(k()),
const_node(v()),
**kwargs
))
@hs.composite
def simple_homogeneous_list_node(draw, **kwargs):
t = draw(primitive_types)
return draw(list_node(const_node(t()), **kwargs))
@hs.composite
def simple_homogeneous_set_node(draw, **kwargs):
t = draw(primitive_types)
homogeneous_set = draw(set_node(const_node(t()), **kwargs))
assume(homogeneous_set.elts != set())
return homogeneous_set
@hs.composite
def name_node(draw, name=None):
if not name:
node = astroid.Name(draw(valid_identifier()))
else:
node = astroid.Name(draw(name))
return node
@hs.composite
def arguments_node(draw, annotated=False):
n = draw(hs.integers(min_value=1, max_value=5))
args = draw(hs.lists(name_node(None), min_size=n, max_size=n))
if annotated:
annotations = draw(hs.lists(name_node(annotation), min_size=n, max_size=n))
else:
annotations = None
node = astroid.Arguments()
node.postinit(
args,
None,
None,
None,
annotations
)
return node
@hs.composite
def functiondef_node(draw, name=None, annotated=False, returns=False):
name = name or draw(valid_identifier())
args = draw(arguments_node(annotated))
body = []
returns_node = astroid.Return()
arg_node, arg_type_node = draw(hs.sampled_from(list(zip(args.args, args.annotations))))
if returns:
returns_node.postinit(arg_node)
else:
returns_node.postinit(const_node(None))
body.append(returns_node)
node = astroid.FunctionDef(name=name)
node.parent = astroid.Module('Default', None)
node.postinit(
args,
body,
None,
arg_type_node
)
return node
expr = hs.one_of(
const_node(),
dict_node(min_size=1),
list_node(min_size=1),
tuple_node()
)
subscriptable_expr = hs.one_of(
const_node(hs.text()),
dict_node(min_size=1),
list_node(min_size=1),
tuple_node()
)
# Helper functions for testing
def _parse_text(source: Union[str, NodeNG], reset: bool = False) -> Tuple[astroid.Module, TypeInferer]:
"""Parse source code text and output an AST with type inference performed."""
# TODO: apparently no literal syntax for empty set in Python3, also cannot do set()
# TODO: Deal with special case later.
# if isinstance(source, astroid.Set) and len(list(source.elts)) == 0:
# source = f'{set({})}'
if not isinstance(source, str): # It's an astroid node
source = source.as_string()
module = astroid.parse(source)
type_inferer = TypeInferer()
if reset:
type_inferer.reset()
type_inferer.environment_transformer().visit(module)
type_inferer.type_inference_transformer().visit(module)
return module, type_inferer
def _verify_type_setting(module, ast_class, expected_type):
"""Helper to verify nodes visited by type inference visitor of astroid class has been properly transformed."""
result = [n.inf_type.getValue() for n in module.nodes_of_class(ast_class)]
assert [expected_type] == result, f'{expected_type}, {result}'
def lookup_type(inferer: TypeInferer, node: NodeNG, name: str) -> type:
"""Given a variable name, return its concrete type in the closest scope relative to given node.
Should be used only for testing purposes.
"""
inf_type = inferer.lookup_inf_type(node, name)
return inf_type.getValue()
def types_in_callable(inferer: TypeInferer, callable_function: Callable) -> Tuple[List[type], type]:
"""Return a tuple of types corresponding to the Callable function's arguments and return value, respectively.
Used only for testing purposes.
"""
arg_type_lst = [inferer.type_constraints.resolve(argument).getValue() for argument in callable_function.__args__]
return arg_type_lst[:-1], arg_type_lst[-1]
|
RyanDJLee/pyta
|
tests/custom_hypothesis_support.py
|
Python
|
gpl-3.0
| 11,987
|
[
"VisIt"
] |
715e10ce523ab18e74dac41394b01f73fba5e84c76c61cbe55a9e26eb1c7f797
|
"""
@name: Modules/Families/_test/test_Hue.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2017-2020 by D. Brian Kimmel
@note: Created on Dec 18, 2017
@license: MIT License
@summary:
Passed all tests - DB - 2018-02-13
"""
__updated__ = '2019-12-30'
# Import system type stuff
from twisted.trial import unittest, reporter, runner
# Import PyMh files and modules.
from Modules.Families.Hue import test as I_test
class Z_Hue(unittest.TestCase):
def setUp(self):
self.m_test = runner.TestLoader()
def test_Hue(self):
l_package = runner.TestLoader().loadPackage(I_test)
l_ret = reporter.Reporter()
l_package.run(l_ret)
l_ret.done()
#
print('\n====================\n*** test_Hue ***\n{}\n'.format(l_ret))
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/House/Family/_test/test_Hue.py
|
Python
|
mit
| 833
|
[
"Brian"
] |
2bfc12586f069cd920e9beb68ec67ef0ebcf369e97d31d5bac68e698909d26c3
|
#! /usr/bin/env python
#
# test_3d_gauss.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
NEST Topology Module
EXPERIMENTAL example of 3d layer.
3d layers are currently not supported, use at your own risk!
Hans Ekkehard Plesser, UMB
'''
import nest
import pylab
import random
import nest.topology as topo
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
pylab.ion()
nest.ResetKernel()
# generate list of 100 (x,y,z) pairs
pos = [[random.uniform(-0.5,0.5), random.uniform(-0.5,0.5), random.uniform(-0.5,0.5)]
for j in range(10000)]
l1 = topo.CreateLayer({'extent': [1.5, 1.5, 1.5], # must specify 3d extent AND center
'center': [0., 0., 0.],
'positions': pos,
'elements': 'iaf_neuron'})
# visualize
#xext, yext = nest.GetStatus(l1, 'topology')[0]['extent']
#xctr, yctr = nest.GetStatus(l1, 'topology')[0]['center']
# extract position information, transpose to list of x, y and z positions
xpos, ypos, zpos = zip(*topo.GetPosition(nest.GetChildren(l1)[0]))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xpos, ypos, zpos, s=15, facecolor='b', edgecolor='none')
# Gaussian connections in full volume [-0.75,0.75]**3
topo.ConnectLayers(l1, l1,
{'connection_type': 'divergent', 'allow_autapses': False,
'mask': {'volume': {'lower_left': [-0.75,-0.75,-0.75], 'upper_right': [0.75,0.75,0.75]}},
'kernel':{'gaussian': {'p_center': 1., 'sigma': 0.25}}})
# show connections from center element
# sender shown in red, targets in green
ctr=topo.FindCenterElement(l1)
xtgt, ytgt, ztgt = zip(*topo.GetTargetPositions(ctr,l1)[0])
xctr, yctr, zctr = topo.GetPosition(ctr)[0]
ax.scatter([xctr],[yctr],[zctr],s=40, facecolor='r', edgecolor='none')
ax.scatter(xtgt,ytgt,ztgt,s=40, facecolor='g', edgecolor='g')
tgts=topo.GetTargetNodes(ctr,l1)[0]
d=topo.Distance(ctr,tgts)
plt.figure()
plt.hist(d,100)
#plt.show()
|
gewaltig/cython-neuron
|
topology/examples/test_3d_gauss.py
|
Python
|
gpl-2.0
| 2,636
|
[
"Gaussian"
] |
a523a0e94f880543b53c51e18a55cb02b604fd34767c3e927f6d692b4dac5a6e
|
# Copyright 2001 by Tarjei Mikkelsen. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""BioPython Pathway module.
Bio.Pathway is a lightweight class library designed to support the following tasks:
- Data interchange and preprocessing between pathway databases and analysis software.
- Quick prototyping of pathway analysis algorithms
The basic object in the Bio.Pathway model is Interaction, which represents an arbitrary
interaction between any number of biochemical species.
Network objects are used to represent the connectivity between species in pathways
and reaction networks.
For applications where it is not necessary to explicitly represent network connectivity,
the specialized classes Reaction and System should be used in place of Interacton and
Network.
The Bio.Pathway classes, especially Interaction, are intentionally
designed to be very flexible. Their intended use are as wrappers around database
specific records, such as BIND objects. The value-added in this module is a
framework for representing collections of reactions in a way that supports
graph theoretic and numeric analysis.
Note: This module should be regarded as a prototype only. API changes are likely.
Comments and feature requests are most welcome.
"""
import sys
# Add path to Bio
sys.path.append('../..')
from functools import reduce
from Bio.Pathway.Rep.MultiGraph import *
__docformat__ = "restructuredtext en"
class Reaction(object):
"""Abstraction for a biochemical transformation.
This class represents a (potentially reversible) biochemical
transformation of the type:
a S1 + b S2 + ... --> c P1 + d P2 + ...
where
- a, b, c, d ... are positive numeric stochiometric coefficients,
- S1, S2, ... are substrates
- P1, P2, ... are products
A Reaction should be viewed as the net result of one or more individual
reaction steps, where each step is potentially facilitated by a different
catalyst. Support for 'Reaction algebra' will be added at some point in
the future.
Attributes:
- reactants -- map of involved species to their stochiometric coefficients:
reactants[S] = stochiometric constant for S
- catalysts -- list of tuples of catalysts required for this reaction
- reversible -- true iff reaction is reversible
- data -- reference to arbitrary additional data
Invariants:
- for all S in reactants: reactants[S] != 0
- for all C in catalysts: catalysts[C] != 0
"""
def __init__(self, reactants={}, catalysts=[],
reversible=0, data=None):
"""Initializes a new Reaction object."""
# enforce invariants on reactants:
self.reactants = reactants.copy()
# loop over original, edit the copy
for r, value in reactants.items():
if value == 0:
del self.reactants[r]
self.catalysts = sorted(set(catalysts))
self.data = data
self.reversible = reversible
def __eq__(self, r):
"""Returns true iff self is equal to r."""
return isinstance(r, Reaction) and \
self.reactants == r.reactants and \
self.catalysts == r.catalysts and \
self.data == r.data and \
self.reversible == r.reversible
def __ne__(self, r):
"""Returns true iff self is not equal to r."""
return not self.__eq__(r)
def __hash__(self):
"""Returns a hashcode for self."""
t = tuple(self.species())
return hash(t)
def __repr__(self):
"""Returns a debugging string representation of self."""
return "Reaction(" + \
",".join(map(repr, [self.reactants,
self.catalysts,
self.data,
self.reversible])) + ")"
def __str__(self):
"""Returns a string representation of self."""
substrates = ""
products = ""
all_species = sorted(self.reactants)
for species in all_species:
stoch = self.reactants[species]
if stoch < 0:
# species is a substrate:
if substrates != "":
substrates = substrates + " + "
if stoch != -1:
substrates = substrates + str(abs(stoch)) + " "
substrates = substrates + str(species)
elif stoch > 0:
# species is a product:
if products != "":
products = products + " + "
if stoch != 1:
products = products + str(stoch) + " "
products = products + str(species)
else:
raise AttributeError("Invalid 0 coefficient in Reaction.reactants")
if self.reversible:
return substrates + " <=> " + products
else:
return substrates + " --> " + products
def reverse(self):
"""Returns a new Reaction that is the reverse of self."""
reactants = {}
for r in self.reactants:
reactants[r] = - self.reactants[r]
return Reaction(reactants, self.catalysts,
self.reversible, self.data)
def species(self):
"""Returns a list of all Species involved in self."""
return list(self.reactants)
class System(object):
"""Abstraction for a collection of reactions.
This class is used in the Bio.Pathway framework to represent an arbitrary
collection of reactions without explicitly defined links.
Attributes:
None
"""
def __init__(self, reactions=[]):
"""Initializes a new System object."""
self.__reactions = set(reactions)
def __repr__(self):
"""Returns a debugging string representation of self."""
return "System(" + ",".join(map(repr, self.__reactions)) + ")"
def __str__(self):
"""Returns a string representation of self."""
return "System of " + str(len(self.__reactions)) + \
" reactions involving " + str(len(self.species())) + \
" species"
def add_reaction(self, reaction):
"""Adds reaction to self."""
self.__reactions.add(reaction)
def remove_reaction(self, reaction):
"""Removes reaction from self."""
self.__reactions.remove(reaction)
def reactions(self):
"""Returns a list of the reactions in this system.
Note the order is arbitrary!
"""
# TODO - Define __lt__ so that Reactions can be sorted on Python?
return list(self.__reactions)
def species(self):
"""Returns a list of the species in this system."""
return sorted(set(reduce(lambda s, x: s + x,
[x.species() for x in self.reactions()], [])))
def stochiometry(self):
"""Computes the stoichiometry matrix for self.
Returns (species, reactions, stoch) where
- species = ordered list of species in this system
- reactions = ordered list of reactions in this system
- stoch = 2D array where stoch[i][j] is coef of the
jth species in the ith reaction, as defined
by species and reactions above
"""
# Note: This an inefficient and ugly temporary implementation.
# To be practical, stochiometric matrices should probably
# be implemented by sparse matrices, which would require
# NumPy dependencies.
#
# PS: We should implement automatic checking for NumPy here.
species = self.species()
reactions = self.reactions()
stoch = [] * len(reactions)
for i in range(len(reactions)):
stoch[i] = 0 * len(species)
for s in reactions[i].species():
stoch[species.index(s)] = reactions[i].reactants[s]
return (species, reactions, stoch)
class Interaction(object):
"""An arbitrary interaction between any number of species.
This class definition is intended solely as a minimal wrapper interface that should
be implemented and extended by more specific abstractions.
Attributes:
- data -- reference to arbitrary additional data
"""
def __init_(self, data):
self.data = data
def __hash__(self):
"""Returns a hashcode for self."""
return hash(self.data)
def __repr__(self):
"""Returns a debugging string representation of self."""
return "Interaction(" + repr(self.data) + ")"
def __str__(self):
"""Returns a string representation of self."""
return "<" + str(self.data) + ">"
class Network(object):
"""A set of species that are explicitly linked by interactions.
The network is a directed multigraph with labeled edges. The nodes in the graph
are the biochemical species involved. The edges represent an interaction between
two species, and the edge label is a reference to the associated Interaction
object.
Attributes:
None
"""
def __init__(self, species=[]):
"""Initializes a new Network object."""
self.__graph = MultiGraph(species)
def __repr__(self):
"""Returns a debugging string representation of this network."""
return "<Network: __graph: " + repr(self.__graph) + ">"
def __str__(self):
"""Returns a string representation of this network."""
return "Network of " + str(len(self.species())) + " species and " + \
str(len(self.interactions())) + " interactions."
def add_species(self, species):
"""Adds species to this network."""
self.__graph.add_node(species)
def add_interaction(self, source, sink, interaction):
"""Adds interaction to this network."""
self.__graph.add_edge(source, sink, interaction)
def source(self, species):
"""Returns list of unique sources for species."""
return self.__graph.parents(species)
def source_interactions(self, species):
"""Returns list of (source, interaction) pairs for species."""
return self.__graph.parent_edges(species)
def sink(self, species):
"""Returns list of unique sinks for species."""
return self.__graph.children(species)
def sink_interactions(self, species):
"""Returns list of (sink, interaction) pairs for species."""
return self.__graph.child_edges(species)
def species(self):
"""Returns list of the species in this network."""
return self.__graph.nodes()
def interactions(self):
"""Returns list of the unique interactions in this network."""
return self.__graph.labels()
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/Pathway/__init__.py
|
Python
|
gpl-2.0
| 10,961
|
[
"Biopython"
] |
13f5bf0123bd33c0c2a02e5ac89978809027f2875c6e998cefab3ee30609be2d
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import sys
import os
import unittest
import subprocess
from PyQt5 import QtCore, QtWidgets
from peacock.ExodusViewer.ExodusPluginManager import main
from peacock.utils import Testing
import mooseutils
class TestExodusPluginManager(Testing.PeacockImageTestCase):
"""
Testing for ExodusPluginManager
"""
#: QApplication: The main App for QT, this must be static to work correctly.
qapp = QtWidgets.QApplication(sys.argv)
#: str: The filename to load.
_filenames = Testing.get_chigger_input_list('mug_blocks_out.e', 'displace.e')
#: str: The script file created for testing
_repr_script = 'TestExodusPluginManager_repr.py'
@classmethod
def setUpClass(cls):
super(TestExodusPluginManager, cls).setUpClass()
if os.path.exists(cls._repr_script):
os.remove(cls._repr_script)
def setUp(self):
"""
Loads an Exodus file in the VTKWindowWidget object using a structure similar to the ExodusViewer widget.
"""
self._widget, self._main_window = main(size=[600,600])
self._window = self._widget.VTKWindowPlugin
self._widget.FilePlugin.onSetFilenames(self._filenames)
# Start with 'diffused' variable
self._widget.FilePlugin.VariableList.setCurrentIndex(2)
self._widget.FilePlugin.VariableList.currentIndexChanged.emit(2)
def testInitial(self):
"""
Tests the file loads.
"""
self.assertImage('testInitial.png', allowed=0.97)
def testWidget(self):
"""
Tests that a bunch of stuff can change without crashing.
"""
# File
self._widget.FilePlugin.FileList.setCurrentIndex(1)
self._widget.FilePlugin.FileList.currentIndexChanged.emit(1)
# Variable
self._widget.ColorbarPlugin.ColorBarToggle.setCheckState(QtCore.Qt.Unchecked)
self._widget.ColorbarPlugin.ColorBarToggle.clicked.emit(False)
# Time
self._widget.MediaControlPlugin.TimeDisplay.setText('0.26')
self._widget.MediaControlPlugin.TimeDisplay.editingFinished.emit()
#self.assertEqual(self._widget.MediaControlPlugin.TimeDisplay.text(), '0.25')
# Reset
self._widget.CameraPlugin.ResetButton.clicked.emit()
# Clip
self._widget.ClipPlugin.setChecked(True)
self._widget.ClipPlugin.clicked.emit(True)
self._widget.ClipPlugin.ClipDirection.setCurrentIndex(1)
# Background
self._widget.BackgroundPlugin.GradientToggle.setChecked(True)
# Mesh
self._widget.MeshPlugin.DisplacementMagnitude.setValue(0.3)
self._widget.MeshPlugin.DisplacementMagnitude.editingFinished.emit()
self._widget.MeshPlugin.ScaleY.setValue(1.5)
self._widget.MeshPlugin.ScaleY.editingFinished.emit()
self.assertImage('testWidget.png')
def testPython(self):
"""
Test generic python script.
"""
self._window.onCameraChanged((-0.7786, 0.2277, 0.5847), (9.2960, -0.4218, 12.6685), (0.0000, 0.0000, 0.1250))
imagename = '{}_{}'.format(self.__class__.__name__, 'basic.png')
self.python(imagename)
def testPythonContour(self):
"""
Test python script with contours.
"""
self._window.onCameraChanged((-0.7786, 0.2277, 0.5847), (9.2960, -0.4218, 12.6685), (0.0000, 0.0000, 0.1250))
self._widget.ContourPlugin.setChecked(True)
self._widget.ContourPlugin.clicked.emit(True)
imagename = '{}_{}'.format(self.__class__.__name__, 'contour.png')
self.python(imagename)
def testPythonClip(self):
"""
Test python script with clip.
"""
self._window.onCameraChanged((-0.7786, 0.2277, 0.5847), (9.2960, -0.4218, 12.6685), (0.0000, 0.0000, 0.1250))
self._widget.ClipPlugin.setChecked(True)
self._widget.ClipPlugin.clicked.emit(True)
self._widget.ClipPlugin.ClipDirection.setCurrentIndex(1)
imagename = '{}_{}'.format(self.__class__.__name__, 'clip.png')
self.python(imagename)
def testPythonScale(self):
"""
Test python script with scale.
"""
self._window.onCameraChanged((-0.7786, 0.2277, 0.5847), (9.2960, -0.4218, 12.6685), (0.0000, 0.0000, 0.1250))
self._widget.MeshPlugin.ScaleY.setValue(1.5)
self._widget.MeshPlugin.ScaleY.editingFinished.emit()
imagename = '{}_{}'.format(self.__class__.__name__, 'scale.png')
self.python(imagename)
def testPythonMulitple(self):
"""
Test python script with multiple filters.
"""
self._window.onCameraChanged((-0.7786, 0.2277, 0.5847), (9.2960, -0.4218, 12.6685), (0.0000, 0.0000, 0.1250))
self._widget.ClipPlugin.setChecked(True)
self._widget.ClipPlugin.clicked.emit(True)
self._widget.ClipPlugin.ClipDirection.setCurrentIndex(1)
self._widget.MeshPlugin.ScaleY.setValue(.25)
self._widget.MeshPlugin.ScaleY.editingFinished.emit()
imagename = '{}_{}'.format(self.__class__.__name__, 'multiple.png')
self.python(imagename)
def python(self, imagename):
"""
Test script writer.
"""
# Test that the script is created
self._window._window.setOptions(test=True, size=[600,600])
self._widget.OutputPlugin.write.emit(self._repr_script)
self.assertTrue(os.path.exists(self._repr_script))
# Inject write command
with open(self._repr_script, 'a') as fid:
fid.write('\nwindow.write({})'.format(repr(imagename)))
# Execute the script
subprocess.call(['python', self._repr_script], stdout=open(os.devnull, 'wb'), stderr=subprocess.STDOUT)
self.assertTrue(os.path.exists(imagename))
# Diff the image from the script
differ = mooseutils.ImageDiffer(os.path.join('gold', imagename), imagename)
print(differ.message())
self.assertFalse(differ.fail())
if __name__ == '__main__':
unittest.main(module=__name__, verbosity=2)
|
nuclear-wizard/moose
|
python/peacock/tests/exodus_tab/test_ExodusPluginManager.py
|
Python
|
lgpl-2.1
| 6,394
|
[
"MOOSE"
] |
a8d619d4f355351fd0c9bddc6d418ff047979776b6f90a7191242deb1b835a0a
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base classes for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import inspect
import types
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
_DISTRIBUTION_PUBLIC_METHOD_WRAPPERS = [
"batch_shape_tensor", "batch_shape", "event_shape_tensor", "event_shape",
"sample", "log_prob", "prob", "log_cdf", "cdf", "log_survival_function",
"survival_function", "entropy", "mean", "variance", "stddev", "mode",
"covariance"]
@six.add_metaclass(abc.ABCMeta)
class _BaseDistribution(object):
"""Abstract base class needed for resolving subclass hierarchy."""
pass
def _copy_fn(fn):
"""Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable.
"""
if not callable(fn):
raise TypeError("fn is not callable: %s" % fn)
# The blessed way to copy a function. copy.deepcopy fails to create a
# non-reference copy. Since:
# types.FunctionType == type(lambda: None),
# and the docstring for the function type states:
#
# function(code, globals[, name[, argdefs[, closure]]])
#
# Create a function object from a code object and a dictionary.
# ...
#
# Here we can use this to create a new function with the old function's
# code, globals, closure, etc.
return types.FunctionType(
code=fn.__code__, globals=fn.__globals__,
name=fn.__name__, argdefs=fn.__defaults__,
closure=fn.__closure__)
def _update_docstring(old_str, append_str):
"""Update old_str by inserting append_str just before the "Args:" section."""
old_str = old_str or ""
old_str_lines = old_str.split("\n")
# Step 0: Prepend spaces to all lines of append_str. This is
# necessary for correct markdown generation.
append_str = "\n".join(" %s" % line for line in append_str.split("\n"))
# Step 1: Find mention of "Args":
has_args_ix = [
ix for ix, line in enumerate(old_str_lines)
if line.strip().lower() == "args:"]
if has_args_ix:
final_args_ix = has_args_ix[-1]
return ("\n".join(old_str_lines[:final_args_ix])
+ "\n\n" + append_str + "\n\n"
+ "\n".join(old_str_lines[final_args_ix:]))
else:
return old_str + "\n\n" + append_str
class _DistributionMeta(abc.ABCMeta):
def __new__(mcs, classname, baseclasses, attrs):
"""Control the creation of subclasses of the Distribution class.
The main purpose of this method is to properly propagate docstrings
from private Distribution methods, like `_log_prob`, into their
public wrappers as inherited by the Distribution base class
(e.g. `log_prob`).
Args:
classname: The name of the subclass being created.
baseclasses: A tuple of parent classes.
attrs: A dict mapping new attributes to their values.
Returns:
The class object.
Raises:
TypeError: If `Distribution` is not a subclass of `BaseDistribution`, or
the new class is derived via multiple inheritance and the first
parent class is not a subclass of `BaseDistribution`.
AttributeError: If `Distribution` does not implement e.g. `log_prob`.
ValueError: If a `Distribution` public method lacks a docstring.
"""
if not baseclasses: # Nothing to be done for Distribution
raise TypeError("Expected non-empty baseclass. Does Distribution "
"not subclass _BaseDistribution?")
which_base = [
base for base in baseclasses
if base == _BaseDistribution or issubclass(base, Distribution)]
base = which_base[0]
if base == _BaseDistribution: # Nothing to be done for Distribution
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
if not issubclass(base, Distribution):
raise TypeError("First parent class declared for %s must be "
"Distribution, but saw '%s'" % (classname, base.__name__))
for attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS:
special_attr = "_%s" % attr
class_attr_value = attrs.get(attr, None)
if attr in attrs:
# The method is being overridden, do not update its docstring
continue
base_attr_value = getattr(base, attr, None)
if not base_attr_value:
raise AttributeError(
"Internal error: expected base class '%s' to implement method '%s'"
% (base.__name__, attr))
class_special_attr_value = attrs.get(special_attr, None)
if class_special_attr_value is None:
# No _special method available, no need to update the docstring.
continue
class_special_attr_docstring = inspect.getdoc(class_special_attr_value)
if not class_special_attr_docstring:
# No docstring to append.
continue
class_attr_value = _copy_fn(base_attr_value)
class_attr_docstring = inspect.getdoc(base_attr_value)
if class_attr_docstring is None:
raise ValueError(
"Expected base class fn to contain a docstring: %s.%s"
% (base.__name__, attr))
class_attr_value.__doc__ = _update_docstring(
class_attr_value.__doc__,
("Additional documentation from `%s`:\n\n%s"
% (classname, class_special_attr_docstring)))
attrs[attr] = class_attr_value
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
class ReparameterizationType(object):
"""Instances of this class represent how sampling is reparameterized.
Two static instances exist in the distritributions library, signifying
one of two possible properties for samples from a distribution:
`FULLY_REPARAMETERIZED`: Samples from the distribution are fully
reparameterized, and straight-through gradients are supported.
`NOT_REPARAMETERIZED`: Samples from the distribution are not fully
reparameterized, and straight-through gradients are either partially
unsupported or are not supported at all. In this case, for purposes of
e.g. RL or variational inference, it is generally safest to wrap the
sample results in a `stop_gradients` call and instead use policy
gradients / surrogate loss instead.
"""
def __init__(self, rep_type):
self._rep_type = rep_type
def __repr__(self):
return "<Reparameteriation Type: %s>" % self._rep_type
def __eq__(self, other):
"""Determine if this `ReparameterizationType` is equal to another.
Since RepaparameterizationType instances are constant static global
instances, equality checks if two instances' id() values are equal.
Args:
other: Object to compare against.
Returns:
`self is other`.
"""
return self is other
# Fully reparameterized distribution: samples from a fully
# reparameterized distribution support straight-through gradients with
# respect to all parameters.
FULLY_REPARAMETERIZED = ReparameterizationType("FULLY_REPARAMETERIZED")
# Not reparameterized distribution: samples from a non-
# reparameterized distribution do not support straight-through gradients for
# at least some of the parameters.
NOT_REPARAMETERIZED = ReparameterizationType("NOT_REPARAMETERIZED")
@six.add_metaclass(_DistributionMeta)
class Distribution(_BaseDistribution):
"""A generic probability distribution base class.
`Distribution` is a base class for constructing and organizing properties
(e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian).
#### Subclassing
Subclasses are expected to implement a leading-underscore version of the
same-named function. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable `log_prob(value,
name="log_prob")` a subclass should implement `_log_prob(value)`.
Subclasses can append to public-level docstrings by providing
docstrings for their method specializations. For example:
```python
@distribution_util.AppendDocstring("Some other details.")
def _log_prob(self, value):
...
```
would add the string "Some other details." to the `log_prob` function
docstring. This is implemented as a simple decorator to avoid python
linter complaining about missing Args/Returns/Raises sections in the
partial docstrings.
#### Broadcasting, batching, and shapes
All distributions support batches of independent distributions of that type.
The batch shape is determined by broadcasting together the parameters.
The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and
`log_prob` reflect this broadcasting, as does the return value of `sample` and
`sample_n`.
`sample_n_shape = [n] + batch_shape + event_shape`, where `sample_n_shape` is
the shape of the `Tensor` returned from `sample_n`, `n` is the number of
samples, `batch_shape` defines how many independent distributions there are,
and `event_shape` defines the shape of samples from each of those independent
distributions. Samples are independent along the `batch_shape` dimensions, but
not necessarily so along the `event_shape` dimensions (depending on the
particulars of the underlying distribution).
Using the `Uniform` distribution as an example:
```python
minval = 3.0
maxval = [[4.0, 6.0],
[10.0, 12.0]]
# Broadcasting:
# This instance represents 4 Uniform distributions. Each has a lower bound at
# 3.0 as the `minval` parameter was broadcasted to match `maxval`'s shape.
u = Uniform(minval, maxval)
# `event_shape` is `TensorShape([])`.
event_shape = u.event_shape
# `event_shape_t` is a `Tensor` which will evaluate to [].
event_shape_t = u.event_shape_tensor()
# Sampling returns a sample per distribution. `samples` has shape
# [5, 2, 2], which is [n] + batch_shape + event_shape, where n=5,
# batch_shape=[2, 2], and event_shape=[].
samples = u.sample_n(5)
# The broadcasting holds across methods. Here we use `cdf` as an example. The
# same holds for `log_cdf` and the likelihood functions.
# `cum_prob` has shape [2, 2] as the `value` argument was broadcasted to the
# shape of the `Uniform` instance.
cum_prob_broadcast = u.cdf(4.0)
# `cum_prob`'s shape is [2, 2], one per distribution. No broadcasting
# occurred.
cum_prob_per_dist = u.cdf([[4.0, 5.0],
[6.0, 7.0]])
# INVALID as the `value` argument is not broadcastable to the distribution's
# shape.
cum_prob_invalid = u.cdf([4.0, 5.0, 6.0])
```
#### Parameter values leading to undefined statistics or distributions.
Some distributions do not have well-defined statistics for all initialization
parameter values. For example, the beta distribution is parameterized by
positive real numbers `concentration1` and `concentration0`, and does not have
well-defined mode if `concentration1 < 1` or `concentration0 < 1`.
The user is given the option of raising an exception or returning `NaN`.
```python
a = tf.exp(tf.matmul(logits, weights_a))
b = tf.exp(tf.matmul(logits, weights_b))
# Will raise exception if ANY batch member has a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=False)
mode = dist.mode().eval()
# Will return NaN for batch members with either a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=True) # Default behavior
mode = dist.mode().eval()
```
In all cases, an exception is raised if *invalid* parameters are passed, e.g.
```python
# Will raise an exception if any Op is run.
negative_a = -1.0 * a # beta distribution by definition has a > 0.
dist = distributions.beta(negative_a, b, allow_nan_stats=True)
dist.mean().eval()
```
"""
def __init__(self,
dtype,
reparameterization_type,
validate_args,
allow_nan_stats,
parameters=None,
graph_parents=None,
name=None):
"""Constructs the `Distribution`.
**This is a private method for subclass use.**
Args:
dtype: The type of the event samples. `None` implies no type-enforcement.
reparameterization_type: Instance of `ReparameterizationType`.
If `distributions.FULLY_REPARAMETERIZED`, this
`Distribution` can be reparameterized in terms of some standard
distribution with a function whose Jacobian is constant for the support
of the standard distribution. If `distributions.NOT_REPARAMETERIZED`,
then no such reparameterization is available.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
parameters: Python `dict` of parameters used to instantiate this
`Distribution`.
graph_parents: Python `list` of graph prerequisites of this
`Distribution`.
name: Python `str` name prefixed to Ops created by this class. Default:
subclass name.
Raises:
ValueError: if any member of graph_parents is `None` or not a `Tensor`.
"""
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not contrib_framework.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
self._dtype = dtype
self._reparameterization_type = reparameterization_type
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
self._parameters = parameters or {}
self._graph_parents = graph_parents
self._name = name or type(self).__name__
@classmethod
def param_shapes(cls, sample_shape, name="DistributionParamShapes"):
"""Shapes of parameters given the desired shape of a call to `sample()`.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`.
Subclasses should override class method `_param_shapes`.
Args:
sample_shape: `Tensor` or python list/tuple. Desired shape of a call to
`sample()`.
name: name to prepend ops with.
Returns:
`dict` of parameter name to `Tensor` shapes.
"""
with ops.name_scope(name, values=[sample_shape]):
return cls._param_shapes(sample_shape)
@classmethod
def param_static_shapes(cls, sample_shape):
"""param_shapes with static (i.e. `TensorShape`) shapes.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`. Assumes that the sample's
shape is known statically.
Subclasses should override class method `_param_shapes` to return
constant-valued tensors when constant values are fed.
Args:
sample_shape: `TensorShape` or python list/tuple. Desired shape of a call
to `sample()`.
Returns:
`dict` of parameter name to `TensorShape`.
Raises:
ValueError: if `sample_shape` is a `TensorShape` and is not fully defined.
"""
if isinstance(sample_shape, tensor_shape.TensorShape):
if not sample_shape.is_fully_defined():
raise ValueError("TensorShape sample_shape must be fully defined")
sample_shape = sample_shape.as_list()
params = cls.param_shapes(sample_shape)
static_params = {}
for name, shape in params.items():
static_shape = tensor_util.constant_value(shape)
if static_shape is None:
raise ValueError(
"sample_shape must be a fully-defined TensorShape or list/tuple")
static_params[name] = tensor_shape.TensorShape(static_shape)
return static_params
@staticmethod
def _param_shapes(sample_shape):
raise NotImplementedError("_param_shapes not implemented")
@property
def name(self):
"""Name prepended to all ops created by this `Distribution`."""
return self._name
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `Distribution`."""
return self._dtype
@property
def parameters(self):
"""Dictionary of parameters used to instantiate this `Distribution`."""
# Remove "self", "__class__", or other special variables. These can appear
# if the subclass used `parameters = locals()`.
return dict((k, v) for k, v in self._parameters.items()
if not k.startswith("__") and k != "self")
@property
def reparameterization_type(self):
"""Describes how samples from the distribution are reparameterized.
Currently this is one of the static instances
`distributions.FULLY_REPARAMETERIZED`
or `distributions.NOT_REPARAMETERIZED`.
Returns:
An instance of `ReparameterizationType`.
"""
return self._reparameterization_type
@property
def allow_nan_stats(self):
"""Python `bool` describing behavior when a stat is undefined.
Stats return +/- infinity when it makes sense. E.g., the variance of a
Cauchy distribution is infinity. However, sometimes the statistic is
undefined, e.g., if a distribution's pdf does not achieve a maximum within
the support of the distribution, the mode is undefined. If the mean is
undefined, then by definition the variance is undefined. E.g. the mean for
Student's T for df = 1 is undefined (no clear way to say it is either + or -
infinity), so the variance = E[(X - mean)**2] is also undefined.
Returns:
allow_nan_stats: Python `bool`.
"""
return self._allow_nan_stats
@property
def validate_args(self):
"""Python `bool` indicating possibly expensive checks are enabled."""
return self._validate_args
def copy(self, **override_parameters_kwargs):
"""Creates a deep copy of the distribution.
Note: the copy distribution may continue to depend on the original
initialization arguments.
Args:
**override_parameters_kwargs: String/value dictionary of initialization
arguments to override with new values.
Returns:
distribution: A new instance of `type(self)` initialized from the union
of self.parameters and override_parameters_kwargs, i.e.,
`dict(self.parameters, **override_parameters_kwargs)`.
"""
parameters = dict(self.parameters, **override_parameters_kwargs)
return type(self)(**parameters)
def _batch_shape_tensor(self):
raise NotImplementedError("batch_shape_tensor is not implemented")
def batch_shape_tensor(self, name="batch_shape_tensor"):
"""Shape of a single sample from a single event index as a 1-D `Tensor`.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Args:
name: name to give to the op
Returns:
batch_shape: `Tensor`.
"""
with self._name_scope(name):
if self.batch_shape.is_fully_defined():
return ops.convert_to_tensor(self.batch_shape.as_list(),
dtype=dtypes.int32,
name="batch_shape")
return self._batch_shape_tensor()
def _batch_shape(self):
return tensor_shape.TensorShape(None)
@property
def batch_shape(self):
"""Shape of a single sample from a single event index as a `TensorShape`.
May be partially defined or unknown.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Returns:
batch_shape: `TensorShape`, possibly unknown.
"""
return self._batch_shape()
def _event_shape_tensor(self):
raise NotImplementedError("event_shape_tensor is not implemented")
def event_shape_tensor(self, name="event_shape_tensor"):
"""Shape of a single sample from a single batch as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
event_shape: `Tensor`.
"""
with self._name_scope(name):
if self.event_shape.is_fully_defined():
return ops.convert_to_tensor(self.event_shape.as_list(),
dtype=dtypes.int32,
name="event_shape")
return self._event_shape_tensor()
def _event_shape(self):
return tensor_shape.TensorShape(None)
@property
def event_shape(self):
"""Shape of a single sample from a single batch as a `TensorShape`.
May be partially defined or unknown.
Returns:
event_shape: `TensorShape`, possibly unknown.
"""
return self._event_shape()
def is_scalar_event(self, name="is_scalar_event"):
"""Indicates that `event_shape == []`.
Args:
name: The name to give this op.
Returns:
is_scalar_event: `bool` scalar `Tensor`.
"""
with self._name_scope(name):
return ops.convert_to_tensor(
self._is_scalar_helper(self.event_shape, self.event_shape_tensor),
name="is_scalar_event")
def is_scalar_batch(self, name="is_scalar_batch"):
"""Indicates that `batch_shape == []`.
Args:
name: The name to give this op.
Returns:
is_scalar_batch: `bool` scalar `Tensor`.
"""
with self._name_scope(name):
return ops.convert_to_tensor(
self._is_scalar_helper(self.batch_shape, self.batch_shape_tensor),
name="is_scalar_batch")
def _sample_n(self, n, seed=None):
raise NotImplementedError("sample_n is not implemented")
def _call_sample_n(self, sample_shape, seed, name, **kwargs):
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
sample_shape, n = self._expand_sample_shape_to_vector(
sample_shape, "sample_shape")
samples = self._sample_n(n, seed, **kwargs)
batch_event_shape = array_ops.shape(samples)[1:]
final_shape = array_ops.concat([sample_shape, batch_event_shape], 0)
samples = array_ops.reshape(samples, final_shape)
samples = self._set_sample_static_shape(samples, sample_shape)
return samples
def sample(self, sample_shape=(), seed=None, name="sample"):
"""Generate samples of the specified shape.
Note that a call to `sample()` without arguments will generate a single
sample.
Args:
sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
seed: Python integer seed for RNG
name: name to give to the op.
Returns:
samples: a `Tensor` with prepended dimensions `sample_shape`.
"""
return self._call_sample_n(sample_shape, seed, name)
def _log_prob(self, value):
raise NotImplementedError("log_prob is not implemented")
def _call_log_prob(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_prob(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._prob(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_prob(self, value, name="log_prob"):
"""Log probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_prob(value, name)
def _prob(self, value):
raise NotImplementedError("prob is not implemented")
def _call_prob(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._prob(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_prob(value, **kwargs))
except NotImplementedError:
raise original_exception
def prob(self, value, name="prob"):
"""Probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_prob(value, name)
def _log_cdf(self, value):
raise NotImplementedError("log_cdf is not implemented")
def _call_log_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_cdf(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_cdf(self, value, name="log_cdf"):
"""Log cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
log_cdf(x) := Log[ P[X <= x] ]
```
Often, a numerical approximation can be used for `log_cdf(x)` that yields
a more accurate answer than simply taking the logarithm of the `cdf` when
`x << -1`.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_cdf(value, name)
def _cdf(self, value):
raise NotImplementedError("cdf is not implemented")
def _call_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._cdf(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def cdf(self, value, name="cdf"):
"""Cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
cdf(x) := P[X <= x]
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_cdf(value, name)
def _log_survival_function(self, value):
raise NotImplementedError("log_survival_function is not implemented")
def _call_log_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_survival_function(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log1p(-self.cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_survival_function(self, value, name="log_survival_function"):
"""Log survival function.
Given random variable `X`, the survival function is defined:
```none
log_survival_function(x) = Log[ P[X > x] ]
= Log[ 1 - P[X <= x] ]
= Log[ 1 - cdf(x) ]
```
Typically, different numerical approximations can be used for the log
survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_log_survival_function(value, name)
def _survival_function(self, value):
raise NotImplementedError("survival_function is not implemented")
def _call_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._survival_function(value, **kwargs)
except NotImplementedError as original_exception:
try:
return 1. - self.cdf(value, **kwargs)
except NotImplementedError:
raise original_exception
def survival_function(self, value, name="survival_function"):
"""Survival function.
Given random variable `X`, the survival function is defined:
```none
survival_function(x) = P[X > x]
= 1 - P[X <= x]
= 1 - cdf(x).
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_survival_function(value, name)
def _entropy(self):
raise NotImplementedError("entropy is not implemented")
def entropy(self, name="entropy"):
"""Shannon entropy in nats."""
with self._name_scope(name):
return self._entropy()
def _mean(self):
raise NotImplementedError("mean is not implemented")
def mean(self, name="mean"):
"""Mean."""
with self._name_scope(name):
return self._mean()
def _variance(self):
raise NotImplementedError("variance is not implemented")
def variance(self, name="variance"):
"""Variance.
Variance is defined as,
```none
Var = E[(X - E[X])**2]
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `Var.shape = batch_shape + event_shape`.
Args:
name: The name to give this op.
Returns:
variance: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_scope(name):
try:
return self._variance()
except NotImplementedError as original_exception:
try:
return math_ops.square(self._stddev())
except NotImplementedError:
raise original_exception
def _stddev(self):
raise NotImplementedError("stddev is not implemented")
def stddev(self, name="stddev"):
"""Standard deviation.
Standard deviation is defined as,
```none
stddev = E[(X - E[X])**2]**0.5
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `stddev.shape = batch_shape + event_shape`.
Args:
name: The name to give this op.
Returns:
stddev: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_scope(name):
try:
return self._stddev()
except NotImplementedError as original_exception:
try:
return math_ops.sqrt(self._variance())
except NotImplementedError:
raise original_exception
def _covariance(self):
raise NotImplementedError("covariance is not implemented")
def covariance(self, name="covariance"):
"""Covariance.
Covariance is (possibly) defined only for non-scalar-event distributions.
For example, for a length-`k`, vector-valued distribution, it is calculated
as,
```none
Cov[i, j] = Covariance(X_i, X_j) = E[(X_i - E[X_i]) (X_j - E[X_j])]
```
where `Cov` is a (batch of) `k x k` matrix, `0 <= (i, j) < k`, and `E`
denotes expectation.
Alternatively, for non-vector, multivariate distributions (e.g.,
matrix-valued, Wishart), `Covariance` shall return a (batch of) matrices
under some vectorization of the events, i.e.,
```none
Cov[i, j] = Covariance(Vec(X)_i, Vec(X)_j) = [as above]
```
where `Cov` is a (batch of) `k' x k'` matrices,
`0 <= (i, j) < k' = reduce_prod(event_shape)`, and `Vec` is some function
mapping indices of this distribution's event dimensions to indices of a
length-`k'` vector.
Args:
name: The name to give this op.
Returns:
covariance: Floating-point `Tensor` with shape `[B1, ..., Bn, k', k']`
where the first `n` dimensions are batch coordinates and
`k' = reduce_prod(self.event_shape)`.
"""
with self._name_scope(name):
return self._covariance()
def _mode(self):
raise NotImplementedError("mode is not implemented")
def mode(self, name="mode"):
"""Mode."""
with self._name_scope(name):
return self._mode()
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
([] if values is None else values) + self._graph_parents)) as scope:
yield scope
def _expand_sample_shape_to_vector(self, x, name):
"""Helper to `sample` which ensures input is 1D."""
x_static_val = tensor_util.constant_value(x)
if x_static_val is None:
prod = math_ops.reduce_prod(x)
else:
prod = np.prod(x_static_val, dtype=x.dtype.as_numpy_dtype())
ndims = x.get_shape().ndims # != sample_ndims
if ndims is None:
# Maybe expand_dims.
ndims = array_ops.rank(x)
expanded_shape = distribution_util.pick_vector(
math_ops.equal(ndims, 0),
np.array([1], dtype=np.int32),
array_ops.shape(x))
x = array_ops.reshape(x, expanded_shape)
elif ndims == 0:
# Definitely expand_dims.
if x_static_val is not None:
x = ops.convert_to_tensor(
np.array([x_static_val], dtype=x.dtype.as_numpy_dtype()),
name=name)
else:
x = array_ops.reshape(x, [1])
elif ndims != 1:
raise ValueError("Input is neither scalar nor vector.")
return x, prod
def _set_sample_static_shape(self, x, sample_shape):
"""Helper to `sample`; sets static shape info."""
# Set shape hints.
sample_shape = tensor_shape.TensorShape(
tensor_util.constant_value(sample_shape))
ndims = x.get_shape().ndims
sample_ndims = sample_shape.ndims
batch_ndims = self.batch_shape.ndims
event_ndims = self.event_shape.ndims
# Infer rank(x).
if (ndims is None and
sample_ndims is not None and
batch_ndims is not None and
event_ndims is not None):
ndims = sample_ndims + batch_ndims + event_ndims
x.set_shape([None] * ndims)
# Infer sample shape.
if ndims is not None and sample_ndims is not None:
shape = sample_shape.concatenate([None]*(ndims - sample_ndims))
x.set_shape(x.get_shape().merge_with(shape))
# Infer event shape.
if ndims is not None and event_ndims is not None:
shape = tensor_shape.TensorShape(
[None]*(ndims - event_ndims)).concatenate(self.event_shape)
x.set_shape(x.get_shape().merge_with(shape))
# Infer batch shape.
if batch_ndims is not None:
if ndims is not None:
if sample_ndims is None and event_ndims is not None:
sample_ndims = ndims - batch_ndims - event_ndims
elif event_ndims is None and sample_ndims is not None:
event_ndims = ndims - batch_ndims - sample_ndims
if sample_ndims is not None and event_ndims is not None:
shape = tensor_shape.TensorShape([None]*sample_ndims).concatenate(
self.batch_shape).concatenate([None]*event_ndims)
x.set_shape(x.get_shape().merge_with(shape))
return x
def _is_scalar_helper(self, static_shape, dynamic_shape_fn):
"""Implementation for `is_scalar_batch` and `is_scalar_event`."""
if static_shape.ndims is not None:
return static_shape.ndims == 0
shape = dynamic_shape_fn()
if (shape.get_shape().ndims is not None and
shape.get_shape()[0].value is not None):
# If the static_shape is correctly written then we should never execute
# this branch. We keep it just in case there's some unimagined corner
# case.
return shape.get_shape().as_list() == [0]
return math_ops.equal(array_ops.shape(shape)[0], 0)
|
strint/tensorflow
|
tensorflow/contrib/distributions/python/ops/distribution.py
|
Python
|
apache-2.0
| 37,586
|
[
"Gaussian"
] |
906fce79e06ba3b6c0f6b7433a77393970e1900dc9d23cbcb6cba414c34bebfa
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates synthetic scenes containing lens flare."""
import math
import tensorflow as tf
from flare_removal.python import utils
def add_flare(scene,
flare,
noise,
flare_max_gain = 10.0,
apply_affine = True,
training_res = 512):
"""Adds flare to natural images.
Here the natural images are in sRGB. They are first linearized before flare
patterns are added. The result is then converted back to sRGB.
Args:
scene: Natural image batch in sRGB.
flare: Lens flare image batch in sRGB.
noise: Strength of the additive Gaussian noise. For each image, the Gaussian
variance is drawn from a scaled Chi-squared distribution, where the scale
is defined by `noise`.
flare_max_gain: Maximum gain applied to the flare images in the linear
domain. RGB gains are applied randomly and independently, not exceeding
this maximum.
apply_affine: Whether to apply affine transformation.
training_res: Resolution of training images. Images must be square, and this
value specifies the side length.
Returns:
- Flare-free scene in sRGB.
- Flare-only image in sRGB.
- Scene with flare in sRGB.
- Gamma value used during synthesis.
"""
batch_size, flare_input_height, flare_input_width, _ = flare.shape
# Since the gamma encoding is unknown, we use a random value so that the model
# will hopefully generalize to a reasonable range of gammas.
gamma = tf.random.uniform([], 1.8, 2.2)
flare_linear = tf.image.adjust_gamma(flare, gamma)
# Remove DC background in flare.
flare_linear = utils.remove_background(flare_linear)
if apply_affine:
rotation = tf.random.uniform([batch_size], minval=-math.pi, maxval=math.pi)
shift = tf.random.normal([batch_size, 2], mean=0.0, stddev=10.0)
shear = tf.random.uniform([batch_size, 2],
minval=-math.pi / 9,
maxval=math.pi / 9)
scale = tf.random.uniform([batch_size, 2], minval=0.9, maxval=1.2)
flare_linear = utils.apply_affine_transform(
flare_linear,
rotation=rotation,
shift_x=shift[:, 0],
shift_y=shift[:, 1],
shear_x=shear[:, 0],
shear_y=shear[:, 1],
scale_x=scale[:, 0],
scale_y=scale[:, 1])
flare_linear = tf.clip_by_value(flare_linear, 0.0, 1.0)
flare_linear = tf.image.crop_to_bounding_box(
flare_linear,
offset_height=(flare_input_height - training_res) // 2,
offset_width=(flare_input_width - training_res) // 2,
target_height=training_res,
target_width=training_res)
flare_linear = tf.image.random_flip_left_right(
tf.image.random_flip_up_down(flare_linear))
# First normalize the white balance. Then apply random white balance.
flare_linear = utils.normalize_white_balance(flare_linear)
rgb_gains = tf.random.uniform([3], 0, flare_max_gain, dtype=tf.float32)
flare_linear *= rgb_gains
# Further augmentation on flare patterns: random blur and DC offset.
blur_size = tf.random.uniform([], 0.1, 3)
flare_linear = utils.apply_blur(flare_linear, blur_size)
offset = tf.random.uniform([], -0.02, 0.02)
flare_linear = tf.clip_by_value(flare_linear + offset, 0.0, 1.0)
flare_srgb = tf.image.adjust_gamma(flare_linear, 1.0 / gamma)
# Scene augmentation: random crop and flips.
scene_linear = tf.image.adjust_gamma(scene, gamma)
scene_linear = tf.image.random_crop(scene_linear, flare_linear.shape)
scene_linear = tf.image.random_flip_left_right(
tf.image.random_flip_up_down(scene_linear))
# Additive Gaussian noise. The Gaussian's variance is drawn from a Chi-squared
# distribution. This is equivalent to drawing the Gaussian's standard
# deviation from a truncated normal distribution, as shown below.
sigma = tf.abs(tf.random.normal([], 0, noise))
noise = tf.random.normal(scene_linear.shape, 0, sigma)
scene_linear += noise
# Random digital gain.
gain = tf.random.uniform([], 0, 1.2) # varying the intensity scale
scene_linear = tf.clip_by_value(gain * scene_linear, 0.0, 1.0)
scene_srgb = tf.image.adjust_gamma(scene_linear, 1.0 / gamma)
# Combine the flare-free scene with a flare pattern to produce a synthetic
# training example.
combined_linear = scene_linear + flare_linear
combined_srgb = tf.image.adjust_gamma(combined_linear, 1.0 / gamma)
combined_srgb = tf.clip_by_value(combined_srgb, 0.0, 1.0)
return (utils.quantize_8(scene_srgb), utils.quantize_8(flare_srgb),
utils.quantize_8(combined_srgb), gamma)
def run_step(scene,
flare,
model,
loss_fn,
noise = 0.0,
flare_max_gain = 10.0,
flare_loss_weight = 0.0,
training_res = 512):
"""Executes a forward step."""
scene, flare, combined, gamma = add_flare(
scene,
flare,
flare_max_gain=flare_max_gain,
noise=noise,
training_res=training_res)
pred_scene = model(combined)
pred_flare = utils.remove_flare(combined, pred_scene, gamma)
flare_mask = utils.get_highlight_mask(flare)
# Fill the saturation region with the ground truth, so that no L1/L2 loss
# and better for perceptual loss since it matches the surrounding scenes.
masked_scene = pred_scene * (1 - flare_mask) + scene * flare_mask
loss_value = loss_fn(scene, masked_scene)
if flare_loss_weight > 0:
masked_flare = pred_flare * (1 - flare_mask) + flare * flare_mask
loss_value += flare_loss_weight * loss_fn(flare, masked_flare)
image_summary = tf.concat([combined, pred_scene, scene, pred_flare, flare],
axis=2)
return loss_value, image_summary
|
google-research/google-research
|
flare_removal/python/synthesis.py
|
Python
|
apache-2.0
| 6,330
|
[
"Gaussian"
] |
932b8f0362511cbf7396a9ba80c9a3b29d308cb08d2336047df332b4fd15cdc7
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is the "deploy" module for the validate_bom script.
It is responsible for deploying spinnaker (via Halyard) remotely.
"""
from multiprocessing.pool import ThreadPool
import json
import logging
import os
import shutil
import stat
import sys
import tempfile
import time
import traceback
from buildtool import (
add_parser_argument,
check_subprocess,
check_subprocess_sequence,
check_subprocesses_to_logfile,
scan_logs_for_install_errors,
run_subprocess,
write_to_path,
raise_and_log_error,
ConfigError,
ExecutionError,
ResponseError,
TimeoutError,
UnexpectedError)
SUPPORTED_DEPLOYMENT_TYPES = ['localdebian', 'distributed']
SUPPORTED_DISTRIBUTED_PLATFORMS = ['kubernetes', 'kubernetes_v2']
HALYARD_SERVICES = ['halyard']
SPINNAKER_SERVICES = [
'clouddriver', 'echo', 'fiat', 'front50', 'gate', 'igor', 'orca',
'rosco', 'kayenta', 'monitoring'
]
def decode_json(data):
try:
return json.JSONDecoder().decode(data)
except (ValueError, TypeError) as err:
logging.error('Error decoding JSON: %s\n%s\n',
err.message, data)
raise
def replace_ha_services(services, options):
"""Replace services with their HA services.
Given a list of services and options, return a new list of services where
services that are enabled for HA are replaced with their HA counterparts.
"""
transform_map = {}
if options.ha_clouddriver_enabled:
transform_map['clouddriver'] = \
['clouddriver-caching', 'clouddriver-rw', 'clouddriver-ro', 'clouddriver-ro-deck']
if options.ha_echo_enabled:
transform_map['echo'] = \
['echo-scheduler', 'echo-worker']
transformed_services = []
for service in services:
transformed_services.extend(transform_map.get(service, [service]))
return transformed_services
def ensure_empty_ssh_key(path, user):
"""Ensure there is an ssh key at the given path.
It is assumed that this key has no password associated with it so we
can use it for ssh/scp.
"""
if os.path.exists(path):
return
logging.debug('Creating %s SSH key for user "%s"', path, user)
check_subprocess_sequence([
'ssh-keygen -N "" -t rsa -f {path} -C {user}'.format(
path=path, user=user),
'sed "s/^ssh-rsa/{user}:ssh-rsa/" -i {path}'.format(
user=user, path=path)
])
def write_data_to_secure_path(data, path=None, is_script=False):
"""Write data to a path with user-only access.
Args:
path: [string] Path to file or None to create a temporary file.
is_script: [bool] True if data is a script (and should be executable).
Returns:
path to file written.
"""
# pylint: disable=invalid-name
if path is None:
fd, path = tempfile.mkstemp()
else:
fd = os.open(path, os.O_WRONLY | os.O_CREAT)
maybe_executable = stat.S_IXUSR if is_script else 0
flags = stat.S_IRUSR | stat.S_IWUSR | maybe_executable
os.fchmod(fd, flags)
os.write(fd, data.encode('utf-8'))
os.close(fd)
return path
def write_script_to_path(script, path=None):
"""Write the script to a path as a secure, user-only executable file.
Args:
script: [list] Sequence of bash statements to script.
path: [string] Path to file to write, or None to create a temp file.
Returns:
path written
"""
data = ['#!/bin/bash',
'set -e',
'set -x']
data.extend(script)
return write_data_to_secure_path(
'\n'.join(data), path=path, is_script=True)
class BaseValidateBomDeployer(object):
"""Base class/interface for Deployer that uses Halyard to deploy Spinnaker.
This class is not intended to be constructed directly. Instead see the
free function make_deployer() in this module.
"""
@property
def options(self):
"""The options bound at construction."""
return self.__options
@property
def metrics(self):
"""The metrics regisry bound at construction."""
return self.__metrics
@property
def hal_user(self):
"""Returns the Halyard User within the deployment VM."""
return self.__hal_user
def __init__(self, options, metrics, runtime_class=None):
if runtime_class:
self.__spinnaker_deployer = runtime_class(options, metrics)
else:
self.__spinnaker_deployer = self
self.__options = options
self.__metrics = metrics
self.__hal_user = options.deploy_hal_user
logging.info('hal_user="%s"', self.__hal_user)
def make_port_forward_command(self, service, local_port, remote_port):
"""Return the command used to forward ports to the given service.
Returns:
array of commandline arguments to create a subprocess with.
"""
return self.__spinnaker_deployer.do_make_port_forward_command(
service, local_port, remote_port)
def deploy(self, init_script, config_script, files_to_upload):
"""Deploy and configure spinnaker.
The deployment configuration is specified via the bound options.
The runtime configuration is passed to the call.
Args:
init_script: [list] The sequence of bash commands to run in order
to prepare the host before installing halyard and configuring.
config_script: [list] The sequence of bash commands to run in order
to configure spinnaker.
file_to_upload: [set] A set of file paths to upload to the deployed
instance before running the init_script. Presumably these will
be referenced by the init_script or config_script.
"""
deploy_labels = {}
self.__metrics.track_and_time_call(
'DeploySpinnaker',
deploy_labels, self.__metrics.default_determine_outcome_labels,
self.__wrapped_deploy, init_script, config_script, files_to_upload)
def __wrapped_deploy(self, init_script, config_script, files_to_upload):
platform = self.options.deploy_hal_platform
logging.info('Deploying with hal on %s...', platform)
script = list(init_script)
self.add_install_hal_script_statements(script)
if self.options.halyard_config_bucket_credentials:
files_to_upload.add(self.options.halyard_config_bucket_credentials)
self.add_inject_halyard_application_default_credentials(
self.options.halyard_config_bucket_credentials, script)
self.add_platform_deploy_script_statements(script)
# Add the version first to avoid warnings or facilitate checks
# with the configuration commands
script.append('hal -q --log=info config version edit'
' --version {version}'
.format(version=self.options.deploy_version))
script.extend(config_script)
self.add_hal_deploy_script_statements(script)
# Dump the hal config so we log it for posterity
script.append('hal -q --log=info config')
script.append('sudo hal -q --log=info deploy apply')
self.add_post_deploy_statements(script)
if not self.options.deploy_deploy:
logging.warning('Skipping deployment because --deploy_deploy=false\n')
return
self.do_deploy(script, files_to_upload)
logging.info('Finished deploying to %s', platform)
def undeploy(self):
undeploy_labels = {}
self.__metrics.track_and_time_call(
'UndeploySpinnaker',
undeploy_labels, self.__metrics.default_determine_outcome_labels,
self.__wrapped_undeploy)
def __wrapped_undeploy(self):
"""Remove the spinnaker deployment and reclaim resources."""
# Consider also undeploying from options.deploy_spinnaker_platform
# with self.__runtime_deployer
platform = self.options.deploy_hal_platform
logging.info('Undeploying hal on %s...', platform)
if not self.options.deploy_undeploy:
logging.warning(
'Skipping undeploy because --deploy_undeploy=false\n')
return
self.do_undeploy()
logging.info('Finished undeploying from %s', platform)
def collect_logs(self):
"""Collect all the microservice log files."""
log_dir = os.path.join(self.options.log_dir, 'service_logs')
if not os.path.exists(log_dir):
os.makedirs(log_dir)
def fetch_service_log(service):
try:
deployer = (self if service in HALYARD_SERVICES
else self.__spinnaker_deployer)
deployer.do_fetch_service_log_file(service, log_dir)
except Exception as ex:
message = 'Error fetching log for service "{service}": {ex}'.format(
service=service, ex=ex)
if ex.message.find('No such file') >= 0:
message += '\n Perhaps the service never started.'
# dont log since the error was already captured.
else:
logging.error(message)
message += '\n{trace}'.format(
trace=traceback.format_exc())
write_data_to_secure_path(
message, os.path.join(log_dir, service + '.log'))
logging.info('Collecting server log files into "%s"', log_dir)
all_services = replace_ha_services(SPINNAKER_SERVICES, self.options)
all_services.extend(HALYARD_SERVICES)
thread_pool = ThreadPool(len(all_services))
thread_pool.map(fetch_service_log, all_services)
thread_pool.terminate()
def do_make_port_forward_command(self, service, local_port, remote_port):
"""Hook for concrete platforms to return the port forwarding command.
Returns:
array of commandline arguments to create a subprocess with.
"""
raise NotImplementedError(self.__class__.__name__)
def do_deploy(self, script, files_to_upload):
"""Hook for specialized platforms to implement the concrete deploy()."""
# pylint: disable=unused-argument
raise NotImplementedError(self.__class__.__name__)
def do_undeploy(self):
"""Hook for specialized platforms to implement the concrete undeploy()."""
raise NotImplementedError(self.__class__.__name__)
def add_inject_halyard_application_default_credentials(
self, local_path, script):
"""Inject google application credentials into halyards startup script.
This is only so we can install halyard against a halyard test repo.
We're doing this injection because halyard does not explicitly support this
use case from installation, though does support the use of application
default credentials.
"""
script.append('first=$(head -1 /opt/halyard/bin/halyard)')
script.append(
'inject="export GOOGLE_APPLICATION_CREDENTIALS={path}"'
.format(path='$(pwd)/' + os.path.basename(local_path)))
script.append('remaining=$(tail -n +2 /opt/halyard/bin/halyard)')
script.append('cat <<EOF | sudo tee /opt/halyard/bin/halyard\n'
'$first\n$inject\n$remaining\n'
'EOF')
script.append('sudo chmod 755 /opt/halyard/bin/halyard')
# Kill running halyard so it restarts with credentials.
# This method awaiting support in halyard to terminate the job.
# In the meantime, we'll kill all the java processes. Since this
# is run on a newly provisioned VM, it should only be halyard.
script.append('echo "Using nuclear option to stop existing halyard"')
script.append('killall java || true') # hack
script.append('echo "Restarting halyard..."')
script.append('sudo su -c "hal -v" -s /bin/bash {user}'
.format(user=self.options.deploy_hal_user))
script.append('for i in `seq 1 30`; do'
' if hal --ready &> /dev/null; then break; fi;'
' sleep 1; done')
def add_install_hal_script_statements(self, script):
"""Adds the sequence of Bash statements to fetch and install halyard."""
options = self.options
script.append('curl -s -O {url}'.format(url=options.halyard_install_script))
install_params = ['-y']
if options.halyard_config_bucket:
install_params.extend(['--config-bucket', options.halyard_config_bucket])
if options.halyard_bucket_base_url:
install_params.extend(['--halyard-bucket-base-url',
options.halyard_bucket_base_url])
if options.halyard_version:
install_params.extend(['--version', options.halyard_version])
if self.hal_user:
install_params.extend(['--user', self.hal_user])
if options.spinnaker_repository:
install_params.extend(
['--spinnaker-repository', options.spinnaker_repository])
if options.spinnaker_registry:
install_params.extend(
['--spinnaker-registry', options.spinnaker_registry])
script.append('sudo bash ./InstallHalyard.sh {install_params}'
.format(install_params=' '.join(install_params)))
return script
def add_platform_deploy_script_statements(self, script):
"""Hook for deployment platform to add specific hal statements."""
pass
def add_hal_deploy_script_statements(self, script):
"""Adds the hal deploy statements prior to "apply"."""
options = self.options
type_args = ['--type', options.deploy_spinnaker_type]
if options.deploy_spinnaker_type == 'distributed':
# Kubectl required for the next hal command, so install it if needed.
script.append(
'if ! `which kubectl >& /dev/null`; then'
' curl -LO https://storage.googleapis.com/kubernetes-release/release'
'/$(curl -s https://storage.googleapis.com/kubernetes-release/release'
'/stable.txt)/bin/linux/amd64/kubectl'
'; chmod +x ./kubectl'
'; sudo mv ./kubectl /usr/local/bin/kubectl'
'; fi')
if options.injected_deploy_spinnaker_account:
type_args.extend(['--account-name',
options.injected_deploy_spinnaker_account])
if options.deploy_distributed_platform == 'kubernetes':
script.append('hal -q --log=info config deploy edit --location {namespace}'
.format(namespace=self.options.deploy_k8s_namespace))
elif options.deploy_distributed_platform == 'kubernetes_v2':
script.append('hal -q --log=info config deploy edit --location {namespace}'
.format(namespace=self.options.deploy_k8s_v2_namespace))
script.append('hal -q --log=info config deploy edit {args}'
.format(args=' '.join(type_args)))
def add_post_deploy_statements(self, script):
"""Add any statements following "hal deploy apply"."""
pass
class KubernetesValidateBomDeployer(BaseValidateBomDeployer):
"""Concrete deployer used to deploy Hal onto Google Cloud Platform.
This class is not intended to be constructed directly. Instead see the
free function make_deployer() in this module.
"""
def __init__(self, options, metrics, **kwargs):
super(KubernetesValidateBomDeployer, self).__init__(
options, metrics, **kwargs)
@classmethod
def init_platform_argument_parser(cls, parser, defaults):
"""Adds custom configuration parameters to argument parser.
This is a helper function for the free function init_argument_parser().
"""
add_parser_argument(
parser, 'deploy_k8s_namespace', defaults, 'spinnaker',
help='Namespace for the account Spinnaker is deployed into.')
@classmethod
def validate_options_helper(cls, options):
"""Adds custom configuration parameters to argument parser.
This is a helper function for make_deployer().
"""
if options.deploy_distributed_platform != 'kubernetes':
return
if not options.k8s_account_name:
raise_and_log_error(
ConfigError('--deploy_distributed_platform="kubernetes" requires'
' a --k8s_account_name be configured.'))
if hasattr(options, "injected_deploy_spinnaker_account"):
raise_and_log_error(
UnexpectedError('deploy_spinnaker_account was already set to "{0}"'
.format(options.injected_deploy_spinnaker_account)))
options.injected_deploy_spinnaker_account = options.k8s_account_name
def __get_pod_name(self, k8s_namespace, service):
"""Determine the pod name for the deployed service."""
options = self.options
flags = ' --namespace {namespace} --logtostderr=false'.format(
namespace=k8s_namespace)
kubectl_command = 'kubectl {context} get pods {flags}'.format(
context=('--context {0}'.format(options.k8s_account_context)
if options.k8s_account_context
else ''),
flags=flags)
retcode, stdout = run_subprocess(
'{command}'
' | gawk -F "[[:space:]]+" "/{service}-v/ {{print \\$1}}"'
' | tail -1'.format(
command=kubectl_command, service=service),
shell=True)
pod = stdout.strip()
if not pod:
message = 'There is no pod for "{service}" in {namespace}'.format(
service=service, namespace=k8s_namespace)
raise_and_log_error(ConfigError(message, cause='NoPod'))
if retcode != 0:
message = 'Could not find pod for "{service}".: {error}'.format(
service=service,
error=stdout.strip())
raise_and_log_error(ExecutionError(message, program='kubectl'))
else:
logging.debug('pod "%s" -> %s', service, stdout)
return stdout.strip()
def do_make_port_forward_command(self, service, local_port, remote_port):
"""Implements interface."""
options = self.options
k8s_namespace = options.deploy_k8s_namespace
service_pod = self.__get_pod_name(k8s_namespace, service)
return [
'kubectl', '--namespace', k8s_namespace,
'port-forward', service_pod,
'{local}:{remote}'.format(local=local_port, remote=remote_port)
]
def do_deploy(self, script, files_to_upload):
"""Implements the BaseBomValidateDeployer interface."""
# This is not yet supported in this script.
# To deploy spinnaker to kubernetes, you need to go through
# a halyard VM deployment. Halyard itself can be deployed to K8s.
# This script doesnt.
super(KubernetesValidateBomDeployer, self).do_deploy(
script, files_to_upload)
def do_undeploy(self):
"""Implements the BaseBomValidateDeployer interface."""
super(KubernetesValidateBomDeployer, self).do_undeploy()
# kubectl delete namespace spinnaker
def do_fetch_service_log_file(self, service, log_dir):
"""Retrieve log file for the given service's pod.
Args:
service: [string] The service's log to get
log_dir: [string] The directory name to write the logs into.
"""
if service == 'monitoring':
# monitoring is in a sidecar of each service
return
options = self.options
k8s_namespace = options.deploy_k8s_namespace
service_pod = self.__get_pod_name(k8s_namespace, service)
containers = ['spin-' + service]
if options.monitoring_install_which:
containers.append('spin-monitoring-daemon')
for container in containers:
if container == 'spin-monitoring-daemon':
path = os.path.join(log_dir, service + '_monitoring.log')
else:
path = os.path.join(log_dir, service + '.log')
retcode, stdout = run_subprocess(
'kubectl -n {namespace} -c {container} {context} logs {pod}'
.format(namespace=k8s_namespace,
container=container,
context=('--context {0}'.format(options.k8s_account_context)
if options.k8s_account_context
else ''),
pod=service_pod),
shell=True)
write_data_to_secure_path(stdout, path)
class KubernetesV2ValidateBomDeployer(BaseValidateBomDeployer):
"""Concrete deployer used to deploy Hal onto Google Cloud Platform.
This class is not intended to be constructed directly. Instead see the
free function make_deployer() in this module.
"""
def __init__(self, options, metrics, **kwargs):
super(KubernetesV2ValidateBomDeployer, self).__init__(
options, metrics, **kwargs)
@classmethod
def init_platform_argument_parser(cls, parser, defaults):
"""Adds custom configuration parameters to argument parser.
This is a helper function for the free function init_argument_parser().
"""
add_parser_argument(
parser, 'deploy_k8s_v2_namespace', defaults, 'spinnaker',
help='Namespace for the account Spinnaker is deployed into.')
@classmethod
def validate_options_helper(cls, options):
"""Adds custom configuration parameters to argument parser.
This is a helper function for make_deployer().
"""
if options.deploy_distributed_platform != 'kubernetes_v2':
return
if not options.k8s_v2_account_name:
raise_and_log_error(
ConfigError('--deploy_distributed_platform="kubernetes_v2" requires'
' a --k8s_v2_account_name be configured.'))
if hasattr(options, "injected_deploy_spinnaker_account"):
raise_and_log_error(
UnexpectedError('deploy_spinnaker_account was already set to "{0}"'
.format(options.injected_deploy_spinnaker_account)))
options.injected_deploy_spinnaker_account = options.k8s_v2_account_name
def __get_pod_name(self, k8s_v2_namespace, service):
"""Determine the pod name for the deployed service."""
options = self.options
flags = ' --namespace {namespace} --logtostderr=false'.format(
namespace=k8s_v2_namespace)
kubectl_command = 'kubectl {context} get pods {flags}'.format(
context=('--context {0}'.format(options.k8s_v2_account_context)
if options.k8s_v2_account_context
else ''),
flags=flags)
retcode, stdout = run_subprocess(
'{command}'
' | gawk -F "[[:space:]]+" "/{service}/ {{print \\$1}}"'
' | tail -1'.format(
command=kubectl_command, service=service),
shell=True)
pod = stdout.strip()
if not pod:
message = 'There is no pod for "{service}" in {namespace}'.format(
service=service, namespace=k8s_v2_namespace)
raise_and_log_error(ConfigError(message, cause='NoPod'))
if retcode != 0:
message = 'Could not find pod for "{service}".: {error}'.format(
service=service,
error=stdout.strip())
raise_and_log_error(ExecutionError(message, program='kubectl'))
else:
logging.debug('pod "%s" -> %s', service, stdout)
return stdout.strip()
def do_make_port_forward_command(self, service, local_port, remote_port):
"""Implements interface."""
options = self.options
k8s_v2_namespace = options.deploy_k8s_v2_namespace
service_pod = self.__get_pod_name(k8s_v2_namespace, service)
return [
'kubectl', '--namespace', k8s_v2_namespace,
'port-forward', service_pod,
'{local}:{remote}'.format(local=local_port, remote=remote_port)
]
def do_deploy(self, script, files_to_upload):
"""Implements the BaseBomValidateDeployer interface."""
# This is not yet supported in this script.
# To deploy spinnaker to kubernetes, you need to go through
# a halyard VM deployment. Halyard itself can be deployed to K8s.
# This script doesnt.
super(KubernetesV2ValidateBomDeployer, self).do_deploy(
script, files_to_upload)
def do_undeploy(self):
"""Implements the BaseBomValidateDeployer interface."""
super(KubernetesV2ValidateBomDeployer, self).do_undeploy()
# kubectl delete namespace spinnaker
def do_fetch_service_log_file(self, service, log_dir):
"""Retrieve log file for the given service's pod.
Args:
service: [string] The service's log to get
log_dir: [string] The directory name to write the logs into.
"""
if service == 'monitoring':
# monitoring is in a sidecar of each service
return
options = self.options
k8s_v2_namespace = options.deploy_k8s_v2_namespace
service_pod = self.__get_pod_name(k8s_v2_namespace, service)
containers = [service]
if options.monitoring_install_which:
containers.append('monitoring-daemon')
for container in containers:
if container == 'monitoring-daemon':
path = os.path.join(log_dir, service + '_monitoring.log')
else:
path = os.path.join(log_dir, service + '.log')
retcode, stdout = run_subprocess(
'kubectl -n {namespace} -c {container} {context} logs {pod}'
.format(namespace=k8s_v2_namespace,
container=container,
context=('--context {0}'.format(options.k8s_v2_account_context)
if options.k8s_v2_account_context
else ''),
pod=service_pod),
shell=True)
write_data_to_secure_path(stdout, path)
class GenericVmValidateBomDeployer(BaseValidateBomDeployer):
"""Concrete deployer used to deploy Hal onto Generic VM
This class is not intended to be constructed directly. Instead see the
free function make_deployer() in this module.
"""
@property
def instance_ip(self):
"""The underlying IP address for the deployed instance."""
if not self.__instance_ip:
self.__instance_ip = self.do_determine_instance_ip()
return self.__instance_ip
def set_instance_ip(self, value):
"""Sets the underlying IP address for the deployed instance."""
self.__instance_ip = value
@property
def ssh_key_path(self):
"""Returns the path to the ssh key for the deployment VM."""
return self.__ssh_key_path
@ssh_key_path.setter
def ssh_key_path(self, path):
"""Sets the path to the ssh key to use."""
self.__ssh_key_path = path
def __init__(self, options, metrics, **kwargs):
super(GenericVmValidateBomDeployer, self).__init__(
options, metrics, **kwargs)
self.__instance_ip = None
self.__ssh_key_path = os.path.join(os.environ['HOME'], '.ssh',
'{0}_empty_key'.format(self.hal_user))
def do_make_port_forward_command(self, service, local_port, remote_port):
"""Implements interface."""
return [
'ssh', '-i', self.__ssh_key_path,
'-o', 'StrictHostKeyChecking=no',
'-o', 'UserKnownHostsFile=/dev/null',
'{user}@{ip}'.format(user=self.hal_user, ip=self.instance_ip),
'-L', '{local_port}:localhost:{remote_port}'.format(
local_port=local_port, remote_port=remote_port),
'-N']
def do_determine_instance_ip(self):
"""Hook for determining the ip address of the hal instance."""
raise NotImplementedError(self.__class__.__name__)
def do_create_vm(self, options):
"""Hook for concrete deployer to craete the VM."""
raise NotImplementedError(self.__class__.__name__)
def __upload_files_helper(self, files_to_upload):
copy_files = (
'scp'
' -i {ssh_key_path}'
' -o StrictHostKeyChecking=no'
' -o UserKnownHostsFile=/dev/null'
' {files}'
' {user}@{ip}:~'
.format(ssh_key_path=self.__ssh_key_path,
files=' '.join(files_to_upload),
user=self.hal_user,
ip=self.instance_ip))
logging.info('Copying deployment and configuration files')
# pylint: disable=unused-variable
for retry in range(0, 10):
returncode, _ = run_subprocess(copy_files)
if returncode == 0:
break
time.sleep(2)
if returncode != 0:
check_subprocess(copy_files)
def __wait_for_ssh_helper(self):
logging.info('Waiting for ssh %s@%s...', self.hal_user, self.instance_ip)
end_time = time.time() + 30
while time.time() < end_time:
retcode, _ = run_subprocess(
'ssh'
' -i {ssh_key}'
' -o StrictHostKeyChecking=no'
' -o UserKnownHostsFile=/dev/null'
' {user}@{ip}'
' "exit 0"'
.format(user=self.hal_user,
ip=self.instance_ip,
ssh_key=self.__ssh_key_path))
if retcode == 0:
logging.info('%s is ready', self.instance_ip)
break
time.sleep(1)
def attempt_install(self, script_path, retry):
"""Attempt to the install script on the remote instance.
Bintray is flaky making this not uncommon to fail intermittently.
Therefore, it is intended that this function may be called multiple
times on the same instance.
"""
attempt_decorator = '+%d' % retry if retry > 0 else ''
logging.info('Configuring deployment%s',
' retry=%d' % retry if retry else '')
logfile = os.path.join(
self.options.output_dir,
'install_spinnaker-%d%s.log' % (os.getpid(), attempt_decorator))
try:
command = (
'ssh'
' -i {ssh_key}'
' -o StrictHostKeyChecking=no'
' -o UserKnownHostsFile=/dev/null'
' {user}@{ip}'
' bash -l -c ./{script_name}'
.format(user=self.hal_user,
ip=self.instance_ip,
ssh_key=self.__ssh_key_path,
script_name=os.path.basename(script_path)))
check_subprocesses_to_logfile('install spinnaker', logfile, [command])
except ExecutionError as error:
scan_logs_for_install_errors(logfile)
return ExecutionError('Halyard deployment failed: %s' % error.message,
program='install')
except Exception as ex:
return UnexpectedError(ex.message)
return None
def do_deploy(self, script, files_to_upload):
"""Implements the BaseBomValidateDeployer interface."""
options = self.options
ensure_empty_ssh_key(self.__ssh_key_path, self.hal_user)
script_parts = []
for path in files_to_upload:
filename = os.path.basename(path)
script_parts.append('sudo chmod 600 {file}'.format(file=filename))
script_parts.append('sudo chown {user}:{user} {file}'
.format(user=self.hal_user, file=filename))
script_parts.extend(script)
script_path = write_script_to_path(script_parts, path=None)
files_to_upload.add(script_path)
try:
self.do_create_vm(options)
self.__upload_files_helper(files_to_upload)
self.__wait_for_ssh_helper()
except Exception as ex:
raise_and_log_error(
ExecutionError('Caught "%s" provisioning vm' % ex.message,
program='provisionVm'))
finally:
shutil.copyfile(script_path,
os.path.join(options.output_dir, 'install-script.sh'))
os.remove(script_path)
files_to_upload.remove(script_path) # in case we need to retry
error = None
max_retries = 10
install_labels = {}
for retry in range(0, max_retries):
error = self.metrics.track_and_time_call(
'InstallSpinnaker',
install_labels,
self.metrics.determine_outcome_labels_from_error_result,
self.attempt_install, script_path, retry)
if not error:
break
logging.warning('Encountered an error during install: %s', error.message)
if retry < (max_retries - 1):
# Re-upload the files because script may have moved them around
# so re-running the script wont find them anymore.
self.__upload_files_helper(files_to_upload)
logging.debug('Re-uploading install files...')
# Clear halyard history
clear_halyard_command = (
'ssh'
' -i {ssh_key}'
' -o StrictHostKeyChecking=no'
' -o UserKnownHostsFile=/dev/null'
' {user}@{ip}'
' "hal deploy clean || true; echo "Y" | sudo ~/.hal/uninstall.sh || true;"'
.format(user=self.hal_user,
ip=self.instance_ip,
ssh_key=self.__ssh_key_path))
run_subprocess(clear_halyard_command)
logging.debug('Waiting a minute before retrying...')
time.sleep(60)
if error:
raise_and_log_error(error)
def do_fetch_service_log_file(self, service, log_dir):
"""Implements the BaseBomValidateDeployer interface."""
write_data_to_secure_path('', os.path.join(log_dir, service + '.log'))
retcode, stdout = run_subprocess(
'ssh'
' -i {ssh_key}'
' -o StrictHostKeyChecking=no'
' -o UserKnownHostsFile=/dev/null'
' {user}@{ip}'
' "if [[ -f /var/log/spinnaker/{service_dir}/{service_name}.log ]];'
' then cat /var/log/spinnaker/{service_dir}/{service_name}.log;'
' else command -v journalctl >/dev/null && journalctl -u {service_name}; fi"'
.format(user=self.hal_user,
ip=self.instance_ip,
ssh_key=self.ssh_key_path,
service_dir=service,
service_name=service))
if retcode != 0:
logging.warning('Failed obtaining %s.log: %s', service, stdout)
write_to_path(stdout, os.path.join(log_dir, service + '.log'))
class AwsValidateBomDeployer(GenericVmValidateBomDeployer):
"""Concrete deployer used to deploy Hal onto Amazon EC2
This class is not intended to be constructed directly. Instead see the
free function make_deployer() in this module.
"""
@classmethod
def init_platform_argument_parser(cls, parser, defaults):
"""Adds custom configuration parameters to argument parser.
This is a helper function for the free function init_argument_parser().
"""
add_parser_argument(
parser, 'deploy_aws_name', defaults, None,
help='Value for name to tag instance with.')
add_parser_argument(
parser, 'deploy_aws_pem_path', defaults, None,
help='Path to the EC2 PEM file.')
add_parser_argument(
parser, 'deploy_aws_security_group', defaults, None,
help='Name of EC2 security group.')
# Make this instead default to a search for the current image.
# https://cloud-images.ubuntu.com/locator/ec2/
add_parser_argument(
# 14.04 east-1 hvm:ebs
parser, 'deploy_aws_ami', defaults, 'ami-0b542c1d',
help='Image ID to run.')
add_parser_argument(
parser, 'deploy_aws_region', defaults, 'us-east-1',
help='Region to deploy aws instance into.'
' Need an aws profile with this name')
@classmethod
def validate_options_helper(cls, options):
"""Adds custom configuration parameters to argument parser.
This is a helper function for make_deployer().
"""
if not options.deploy_aws_name:
return
if not options.deploy_aws_pem_path:
raise_and_log_error(ConfigError('--deploy_aws_pem_path not specified.'))
if not os.path.exists(options.deploy_aws_pem_path):
raise_and_log_error(
ConfigError('File "{path}" does not exist.'
.format(path=options.deploy_aws_pem_path)))
if not options.deploy_aws_security_group:
raise_and_log_error(
ConfigError('--deploy_aws_security_group not specified.'))
if options.deploy_deploy:
logging.debug('Looking for existing EC2 instance.')
retcode, stdout = run_subprocess(
'aws ec2 describe-instances'
' --profile {region}'
' --filters "Name=tag:Name,Values={name}'
',Name=instance-state-name,Values=running"'
.format(region=options.deploy_aws_region,
name=options.deploy_aws_name))
if retcode != 0:
raise_and_log_error(
ExecutionError('Could not probe AWS: {0}'.format(stdout),
program='aws'))
reservations = decode_json(stdout).get('Reservations')
# For some reason aws is ignoring our filter, so check again just to be
# sure the reservations returned are the ones we asked for.
for reservation in reservations or []:
for tags in reservation.get('Tags', []):
if (tags.get('Key') == 'Name'
and tags.get('Value') == options.deploy_aws_name):
raise_and_log_error(
ConfigError(
'Running "{name}" already exists: {info}'
.format(name=options.deploy_aws_name, info=reservation),
cause='VmExists'))
logging.warning('aws returned another instance - ignore: %s',
reservation)
def __init__(self, options, metrics, **kwargs):
super(AwsValidateBomDeployer, self).__init__(options, metrics, **kwargs)
self.__instance_id = None
self.ssh_key_path = options.deploy_aws_pem_path
def __find_instance_with_name(self, response, name):
"""Locate the desired instance in the response."""
if not response:
logging.error('Unexpected empty response.')
return {}
for elem in response:
for instance in elem['Instances']:
for tag in instance.get('Tags', []):
if tag['Key'] == 'Name' and tag['Value'] == name:
return instance
logging.error('No instance tagged %r found in response.', name)
return {}
def do_determine_instance_ip(self):
"""Implements GenericVmValidateBomDeployer interface."""
options = self.options
logging.debug('Looking up EC2 instance IP.')
retcode, stdout = run_subprocess(
'aws ec2 describe-instances'
' --profile {region}'
' --output json'
' --filters "Name=tag:Name,Values={name}'
',Name=instance-state-name,Values=running"'
.format(region=options.deploy_aws_region,
name=options.deploy_aws_name))
if retcode != 0:
raise_and_log_error(
ExecutionError('Could not determine public IP: {0}'.format(stdout),
program='aws'))
found = decode_json(stdout).get('Reservations')
if not found:
raise_and_log_error(
ResponseError(
'"{0}" is not running'.format(options.deploy_aws_name),
server='ec2'))
try:
# Although we filtered, sometimes aws CLI returns others.
instance = self.__find_instance_with_name(
found, options.deploy_aws_name)
public_ip = instance['PublicIpAddress']
except KeyError:
logging.error('**** aws ec2 describe instances returned %r\n'
'expected "PublicIpAddress" for instance named %s',
found, options.deploy_aws_name)
raise
logging.debug('Using public IP=%s', public_ip)
return public_ip
def do_create_vm(self, options):
"""Implements GenericVmValidateBomDeployer interface."""
pem_basename = os.path.basename(options.deploy_aws_pem_path)
key_pair_name = os.path.splitext(pem_basename)[0]
logging.info('Creating "%s" with key-pair "%s"',
options.deploy_aws_name, key_pair_name)
logging.debug('Creating new EC2 VM.')
response = check_subprocess(
'aws ec2 run-instances'
' --profile {region}'
' --output json'
' --count 1'
' --image-id {ami}'
' --instance-type {type}'
' --key-name {key_pair_name}'
' --security-group-ids {sg}'
.format(region=options.deploy_aws_region,
ami=options.deploy_aws_ami,
type='t2.xlarge', # 4 core x 16G
key_pair_name=key_pair_name,
sg=options.deploy_aws_security_group))
doc = decode_json(response)
self.__instance_id = doc["Instances"][0]["InstanceId"]
logging.info('Created instance id=%s to tag as "%s"',
self.__instance_id, options.deploy_aws_name)
# It's slow to start up and sometimes there is a race condition
# in which describe-instances doesnt know about our id even though
# create-tags did, or create-tags doesnt know abut the new id.
time.sleep(5)
end_time = time.time() + 10*60
did_tag = False
while time.time() < end_time:
if not did_tag:
tag_retcode, _ = run_subprocess(
'aws ec2 create-tags'
' --region {region}'
' --resources {instance_id}'
' --tags "Key=Name,Value={name}"'
.format(region=options.deploy_aws_region,
instance_id=self.__instance_id,
name=options.deploy_aws_name))
did_tag = tag_retcode == 0
if self.__is_ready():
return
time.sleep(5)
raise_and_log_error(
TimeoutError('Giving up waiting for deployment.', cause='ec2'))
def __is_ready(self):
retcode, stdout = run_subprocess(
'aws ec2 describe-instances'
' --profile {region}'
' --output json'
' --instance-ids {id}'
' --query "Reservations[*].Instances[*]"'
.format(region=self.options.deploy_aws_region,
id=self.__instance_id))
if retcode != 0:
logging.warning('Could not determine public IP: %s', stdout)
return False
# result is an array of reservations of ararys of instances.
# but we only expect one, so fish out the first instance info
info = decode_json(stdout)[0][0]
state = info.get('State', {}).get('Name')
if state in ['pending', 'initializing']:
logging.info('Waiting for %s to finish initializing (state=%s)',
self.__instance_id, state)
return False
if state in ['shutting-down', 'terminated']:
raise_and_log_error(ResponseError('VM failed: {0}'.format(info),
server='ec2'))
logging.info('%s is in state %s', self.__instance_id, state)
self.set_instance_ip(info.get('PublicIpAddress'))
# attempt to ssh into it so we know we're accepting connections when
# we return. It takes time to start
logging.info('Checking if it is ready for ssh...')
retcode, stdout = run_subprocess(
'ssh'
' -i {ssh_key}'
' -o StrictHostKeyChecking=no'
' -o UserKnownHostsFile=/dev/null'
' {user}@{ip}'
' "exit 0"'
.format(user=self.hal_user,
ip=self.instance_ip,
ssh_key=self.ssh_key_path))
if retcode == 0:
logging.info('%s is ready', self.instance_ip)
return True
# Sometimes ssh accepts but authentication still fails
# for a while. If this is the case, then try again
# though the whole loop to distinguish VM going away.
logging.info('%s\nNot yet ready...', stdout.strip())
return False
def do_undeploy(self):
"""Implements the BaseBomValidateDeployer interface."""
options = self.options
logging.info('Terminating "%s"', options.deploy_aws_name)
if self.__instance_id:
all_ids = [self.__instance_id]
else:
lookup_response = check_subprocess(
'aws ec2 describe-instances'
' --profile {region}'
' --filters "Name=tag:Name,Values={name}'
',Name=instance-state-name,Values=running"'
.format(region=options.deploy_aws_region,
name=options.deploy_aws_name))
exists = decode_json(lookup_response).get('Reservations')
if not exists:
logging.warning('"%s" is not running', options.deploy_aws_name)
return
all_ids = []
for reservation in exists:
all_ids.extend([instance['InstanceId']
for instance in reservation['Instances']])
for instance_id in all_ids:
logging.info('Terminating "%s" instanceId=%s',
options.deploy_aws_name, instance_id)
retcode, _ = run_subprocess(
'aws ec2 terminate-instances'
' --profile {region}'
' --instance-ids {id}'
.format(region=options.deploy_aws_region, id=instance_id))
if retcode != 0:
logging.warning('Failed to delete "%s" instanceId=%s',
options.deploy_aws_name, instance_id)
class AzureValidateBomDeployer(GenericVmValidateBomDeployer):
"""Concrete deployer used to deploy Hal onto Microsoft Azure
This class is not intended to be constructed directly. Instead see the
free function make_deployer() in this module.
"""
@classmethod
def init_platform_argument_parser(cls, parser, defaults):
"""Adds custom configuration parameters to argument parser.
This is a helper function for the free function init_argument_parser().
"""
add_parser_argument(
parser, 'deploy_azure_location', defaults, 'eastus',
help='Azure region to deploy to if --deploy_hal_platform is "azure".')
add_parser_argument(
parser, 'deploy_azure_resource_group', defaults, None,
help='Azure resource group to deploy to'
' if --deploy_hal_platform is "azure".')
add_parser_argument(
parser, 'deploy_azure_name', defaults, None,
help='Azure VM name to deploy to if --deploy_hal_platform is "azure".')
add_parser_argument(
parser, 'deploy_azure_image',
defaults, 'Canonical:UbuntuServer:14.04.5-LTS:latest',
help='Azure image to deploy.')
@classmethod
def validate_options_helper(cls, options):
"""Adds custom configuration parameters to argument parser.
This is a helper function for make_deployer().
"""
if not options.deploy_azure_resource_group:
raise_and_log_error(
ConfigError('--deploy_azure_resource_group not specified.'))
if not options.deploy_azure_name:
raise_and_log_error(
ConfigError('--deploy_azure_name not specified.'))
if options.deploy_deploy:
retcode, _ = run_subprocess(
'az vm show --resource-group {rg} --vm-name {name}'
.format(rg=options.deploy_azure_resource_group,
name=options.deploy_azure_name))
if retcode == 0:
raise_and_log_error(UnexpectedError(
'"{name}" already exists in resource-group={rg}'
.format(name=options.deploy_azure_name,
rg=options.deploy_azure_resource_group)))
def do_create_vm(self, options):
"""Implements GenericVmValidateBomDeployer interface."""
logging.info('Creating "%s" in resource-group "%s"',
options.deploy_azure_name,
options.deploy_azure_resource_group)
response = check_subprocess(
'az vm create'
' --name {name}'
' --resource-group {rg}'
' --location {location}'
' --image {image}'
' --use-unmanaged-disk'
' --storage-sku Standard_LRS'
' --size Standard_D12_v2_Promo'
' --ssh-key-value {ssh_key_path}.pub'
.format(name=options.deploy_azure_name,
rg=options.deploy_azure_resource_group,
location=options.deploy_azure_location,
image=options.deploy_azure_image,
ssh_key_path=self.ssh_key_path))
self.set_instance_ip(decode_json(response)['publicIpAddress'])
def do_undeploy(self):
"""Implements the BaseBomValidateDeployer interface."""
options = self.options
if options.deploy_spinnaker_type == 'distributed':
run_subprocess(
'ssh'
' -i {ssh_key}'
' -o StrictHostKeyChecking=no'
' -o UserKnownHostsFile=/dev/null'
' {user}@{ip} sudo hal -q --log=info deploy clean'
.format(user=self.hal_user,
ip=self.instance_ip,
ssh_key=self.ssh_key_path))
check_subprocess(
'az vm delete -y'
' --name {name}'
' --resource-group {rg}'
.format(name=options.deploy_azure_name,
rg=options.deploy_azure_resource_group))
def do_determine_instance_ip(self):
"""Implements GenericVmValidateBomDeployer interface."""
options = self.options
retcode, stdout = run_subprocess(
'az vm list-ip-addresses --name {name} --resource-group {group}'.format(
name=options.deploy_azure_name,
group=options.deploy_azure_resource_group))
if retcode != 0:
raise_and_log_error(
ExecutionError('Could not determine public IP: {0}'.format(stdout),
program='az'))
found = decode_json(stdout)[0].get('virtualMachine')
if not found:
raise_and_log_error(
ResponseError(
'"{0}" is not running'.format(options.deploy_azure_name),
server='az'))
return found['network']['publicIpAddresses'][0]['ipAddress']
class GoogleValidateBomDeployer(GenericVmValidateBomDeployer):
"""Concrete deployer used to deploy Hal onto Google Cloud Platform.
This class is not intended to be constructed directly. Instead see the
free function make_deployer() in this module.
"""
def do_determine_instance_ip(self):
"""Implements GenericVmValidateBomDeployer interface."""
options = self.options
# Note: this used to dup_stderr_to_stdout=False with an older API
# presumably this wont return stderr anymore or it will corrupt the json.
logging.debug('Looking up IP address for "%s"...',
options.deploy_google_instance)
response = check_subprocess(
'gcloud compute instances describe'
' --format json'
' --account {gcloud_account}'
' --project {project} --zone {zone} {instance}'
.format(gcloud_account=options.deploy_hal_google_service_account,
project=options.deploy_google_project,
zone=options.deploy_google_zone,
instance=options.deploy_google_instance))
nic = decode_json(response)['networkInterfaces'][0]
use_internal_ip = options.deploy_google_use_internal_ip
if use_internal_ip:
logging.debug('Using internal IP=%s', nic['networkIP'])
return nic['networkIP']
ip = nic['accessConfigs'][0]['natIP']
logging.debug('Using natIP=%s', ip)
return ip
def __init__(self, options, metrics, **kwargs):
super(GoogleValidateBomDeployer, self).__init__(options, metrics, **kwargs)
@classmethod
def init_platform_argument_parser(cls, parser, defaults):
"""Adds custom configuration parameters to argument parser.
This is a helper function for the free function init_argument_parser().
"""
add_parser_argument(
parser, 'deploy_google_project', defaults, None,
help='Google project to deploy to if --deploy_hal_platform is "gce".')
add_parser_argument(
parser, 'deploy_google_zone', defaults, 'us-central1-f',
help='Google zone to deploy to if --deploy_hal_platform is "gce".')
add_parser_argument(
parser, 'deploy_google_instance', defaults, None,
help='Google instance to deploy to if --deploy_hal_platform is "gce".')
add_parser_argument(
parser, 'deploy_google_machine_type', defaults, 'n1-standard-4',
help='Google machine type if --deploy_hal_platform is "gce".')
add_parser_argument(
parser, 'deploy_google_image_family', defaults, 'ubuntu-1404-lts',
help='Google image family to deploy if --deploy_hal_platform is "gce".')
add_parser_argument(
parser, 'deploy_google_image_project', defaults, 'ubuntu-os-cloud',
help='Project containing image from --deploy_google_image_family.')
add_parser_argument(
parser, 'deploy_google_network', defaults, 'default',
help='The GCP Network to deploy spinnaker into.')
add_parser_argument(
parser, 'deploy_google_use_internal_ip', defaults, True, type=bool,
help='Force the internal IP to connect to the deployed instance.'
' This is only valid when talking within the same project.')
parser.add_argument(
'--deploy_google_use_external_ip',
dest='deploy_google_use_internal_ip', action='store_false',
help='DEPRECATED: Use --deploy_google_use_internal_ip=false')
add_parser_argument(
parser, 'deploy_google_tags',
defaults, 'spinnaker-validation-instance',
help='A comma-delimited list of GCP network tags to tag'
' the deployed instances with.')
add_parser_argument(
parser, 'deploy_hal_google_service_account', defaults, None,
help='When deploying to gce, this is the service account to use'
' for configuring halyard.')
@classmethod
def validate_options_helper(cls, options):
"""Adds custom configuration parameters to argument parser.
This is a helper function for make_deployer().
"""
if not options.deploy_google_project:
raise_and_log_error(
ConfigError('--deploy_google_project not specified.'))
if not options.deploy_google_instance:
raise_and_log_error(
ConfigError('--deploy_google_instance not specified.'))
if not options.deploy_hal_google_service_account:
raise_and_log_error(
ConfigError('--deploy_hal_google_service_account not specified.'))
if options.deploy_deploy:
logging.debug('Checking if "%s" already exists...',
options.deploy_google_instance)
retcode, _ = run_subprocess(
'gcloud compute instances describe'
' --account {gcloud_account}'
' --project {project} --zone {zone} {instance}'
.format(gcloud_account=options.deploy_hal_google_service_account,
project=options.deploy_google_project,
zone=options.deploy_google_zone,
instance=options.deploy_google_instance))
if retcode == 0:
raise_and_log_error(ConfigError(
'"{instance}" already exists in project={project} zone={zone}'
.format(instance=options.deploy_google_instance,
project=options.deploy_google_project,
zone=options.deploy_google_zone),
cause='VmExists'))
def do_create_vm(self, options):
"""Implements the BaseBomValidateDeployer interface."""
logging.info('Creating "%s" in project "%s"',
options.deploy_google_instance,
options.deploy_google_project)
with open(self.ssh_key_path + '.pub', 'r') as f:
ssh_key = f.read().strip()
if ssh_key.startswith('ssh-rsa'):
ssh_key = self.hal_user + ':' + ssh_key
check_subprocess(
'gcloud compute instances create'
' --account {gcloud_account}'
' --machine-type {machine_type}'
' --image-family {image_family}'
' --image-project {image_project}'
' --metadata block-project-ssh-keys=TRUE,ssh-keys="{ssh_key}"'
' --project {project} --zone {zone}'
' --network {network}'
' --tags {network_tags}'
' --scopes {scopes}'
' {instance}'
.format(gcloud_account=options.deploy_hal_google_service_account,
machine_type=options.deploy_google_machine_type,
image_family=options.deploy_google_image_family,
image_project=options.deploy_google_image_project,
project=options.deploy_google_project,
zone=options.deploy_google_zone,
scopes='compute-rw,storage-full,logging-write,monitoring',
network=options.deploy_google_network,
network_tags=options.deploy_google_tags,
ssh_key=ssh_key,
instance=options.deploy_google_instance),
stream=sys.stdout)
def do_undeploy(self):
"""Implements the BaseBomValidateDeployer interface."""
options = self.options
if options.deploy_spinnaker_type == 'distributed':
run_subprocess(
'ssh'
' -i {ssh_key}'
' -o StrictHostKeyChecking=no'
' -o UserKnownHostsFile=/dev/null'
' {user}@{ip} sudo hal -q --log=info deploy clean'
.format(user=self.hal_user,
ip=self.instance_ip,
ssh_key=self.ssh_key_path))
check_subprocess(
'gcloud -q compute instances delete'
' --account {gcloud_account}'
' --project {project} --zone {zone} {instance}'
.format(gcloud_account=options.deploy_hal_google_service_account,
project=options.deploy_google_project,
zone=options.deploy_google_zone,
instance=options.deploy_google_instance))
def make_deployer(options, metrics):
"""Public interface to instantiate the desired Deployer.
Args:
options: [Namespace] from an argument parser given to init_argument_parser
"""
if options.deploy_hal_platform == 'gce':
hal_klass = GoogleValidateBomDeployer
elif options.deploy_hal_platform == 'ec2':
hal_klass = AwsValidateBomDeployer
elif options.deploy_hal_platform == 'azure':
hal_klass = AzureValidateBomDeployer
else:
raise_and_log_error(ConfigError(
'Invalid --deploy_hal_platform=%s' % options.deploy_hal_platform))
if options.deploy_spinnaker_type not in SUPPORTED_DEPLOYMENT_TYPES:
raise_and_log_error(ConfigError(
'Invalid --deploy_spinnaker_type "{0}". Must be one of {1}'
.format(options.deploy_spinnaker_type, SUPPORTED_DEPLOYMENT_TYPES)))
# This is the class for accessing the Spinnaker deployment if other than Hal.
spin_klass = None
if options.deploy_spinnaker_type == 'distributed':
if (options.deploy_distributed_platform
not in SUPPORTED_DISTRIBUTED_PLATFORMS):
raise_and_log_error(ConfigError(
'A "distributed" deployment requires --deploy_distributed_platform'))
if options.deploy_distributed_platform == 'kubernetes':
spin_klass = KubernetesValidateBomDeployer
elif options.deploy_distributed_platform == 'kubernetes_v2':
spin_klass = KubernetesV2ValidateBomDeployer
else:
raise_and_log_error(ConfigError(
'Unknown --deploy_distributed_platform.'
' This must be the value of one of the following parameters: {0}'
.format(SUPPORTED_DISTRIBUTED_PLATFORMS)))
hal_klass.validate_options_helper(options)
if spin_klass:
spin_klass.validate_options_helper(options)
return hal_klass(options, metrics, runtime_class=spin_klass)
def determine_deployment_platform(options):
"""Helper function to determine the deployment platform being tested.
This is used for instrumentation purposes.
"""
platform = options.deploy_hal_platform
if options.deploy_spinnaker_type == 'distributed':
if platform == 'gce':
platform = 'gke'
else:
platform += '+k8s'
return platform
def init_argument_parser(parser, defaults):
"""Initialize the argument parser with deployment and configuration params.
Args:
parser: [ArgumentParser] The argument parser to add the parameters to.
"""
# pylint: disable=line-too-long
add_parser_argument(
parser, 'halyard_install_script', defaults,
'https://raw.githubusercontent.com/spinnaker/halyard/master/install/debian/InstallHalyard.sh',
help='The URL to the InstallHalyard.sh script.')
add_parser_argument(
parser, 'halyard_version', defaults, None,
help='If provided, the specific version of halyard to use.')
add_parser_argument(
parser, 'halyard_bucket_base_url', defaults, None,
help='The base URL for the bucket containing the halyard jar files'
' to override, if any.')
add_parser_argument(
parser, 'halyard_config_bucket', defaults, None,
help='The global halyard configuration bucket to override, if any.')
add_parser_argument(
parser, 'halyard_config_bucket_credentials', defaults, None,
help='If specified, give these credentials to halyard'
' in order to access the global halyard GCS bucket.')
add_parser_argument(
parser, 'spinnaker_repository',
defaults, 'https://dl.bintray.com/spinnaker-releases/debians',
help='The location of the spinnaker debian repository.')
add_parser_argument(
parser, 'spinnaker_registry', defaults, 'gcr.io/spinnaker-marketplace',
help='The location of the spinnaker container registry.')
add_parser_argument(
parser, 'deploy_spinnaker_type', defaults, None,
choices=SUPPORTED_DEPLOYMENT_TYPES,
help='The type of spinnaker deployment to create.')
add_parser_argument(
parser, 'deploy_hal_platform', defaults, None,
choices=['gce', 'ec2', 'azure'],
help='Platform to deploy Halyard onto.'
' Halyard will then deploy Spinnaker.')
add_parser_argument(
parser, 'deploy_hal_user', defaults, os.environ.get('LOGNAME'),
help='User name on deployed hal_platform for deploying hal.'
' This is used to scp and ssh from this machine.')
add_parser_argument(
parser, 'deploy_distributed_platform', defaults, 'kubernetes',
choices=SUPPORTED_DISTRIBUTED_PLATFORMS,
help='The platform to deploy spinnaker to when'
' --deploy_spinnaker_type=distributed')
add_parser_argument(
parser, 'deploy_version', defaults, 'master-latest-unvalidated',
help='Spinnaker version to deploy. The default is "master-latest-unverified".')
add_parser_argument(
parser, 'deploy_deploy', defaults, True, type=bool,
help='Actually perform the deployment.'
' This is for facilitating debugging with this script.')
add_parser_argument(
parser, 'deploy_undeploy', defaults, True, type=bool,
help='Actually perform the undeployment.'
' This is for facilitating debugging with this script.')
add_parser_argument(
parser, 'deploy_always_collect_logs', defaults, False, type=bool,
help='Always collect logs.'
'By default logs are only collected when deploy_undeploy is True.')
AwsValidateBomDeployer.init_platform_argument_parser(parser, defaults)
AzureValidateBomDeployer.init_platform_argument_parser(parser, defaults)
GoogleValidateBomDeployer.init_platform_argument_parser(parser, defaults)
KubernetesValidateBomDeployer.init_platform_argument_parser(parser, defaults)
KubernetesV2ValidateBomDeployer.init_platform_argument_parser(parser, defaults)
|
ewiseblatt/spinnaker
|
dev/validate_bom__deploy.py
|
Python
|
apache-2.0
| 62,064
|
[
"ORCA"
] |
68fefb7127602b72ebd1627d2f8460247c0b68bd7bc192d433a352af4218db6b
|
# This file is part of TxtAlert.
#
# TxtALert is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# TxtAlert is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with TxtAlert. If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime, date, timedelta
from django.test import TestCase
from django.contrib.auth.models import User
from django.utils import timezone
from txtalert.core.models import *
from txtalert.apps.therapyedge.importer import Importer, InvalidValueException
from txtalert.apps.therapyedge.tests.utils import create_instance
from txtalert.apps.therapyedge.tests.utils import (PatientUpdate, ComingVisit, MissedVisit,
DoneVisit, DeletedVisit)
def reload_record(record):
return record.__class__.objects.get(pk=record.pk)
class PatientImportTestCase(TestCase):
fixtures = ['clinics.json', 'patients.json',]
def setUp(self):
self.importer = Importer()
self.user = User.objects.get(username='kumbu')
def testInvalidAgeImport(self):
# import an invalid patient record
self.assertRaises(InvalidValueException, # exception
self.importer.update_local_patient, # callable
self.user, # args
create_instance(PatientUpdate, {
'te_id': '01-1235',
'age': '3135',
'sex': 'Female',
'celphone': '082123'
})
)
def testInvalidSexImport(self):
# import an invalid patient record
self.assertRaises(InvalidValueException, # exception
self.importer.update_local_patient, # callable
self.user, # args
create_instance(PatientUpdate, {
'te_id': '01-1235',
'age': '31',
'sex': 'Feale',
'celphone': '082123'
})
)
def testBasicImport(self):
"""basic patient record import"""
patient = self.importer.update_local_patient(self.user,
create_instance(PatientUpdate, {
'te_id': '03-12345',
'age': '25',
'sex': 'Male',
'celphone': '0821231234'
}))
# reload to make sure we have the database values
patient = reload_record(patient)
self.assertEquals(patient.te_id, '03-12345')
self.assertEquals(patient.age, 25)
self.assertEquals(patient.sex, 'm')
self.assertEquals(patient.msisdns.latest('id').msisdn, '27821231234')
self.assertEquals(
patient.history.latest().get_history_type_display(),
'Created'
)
def testAlterDetails(self):
"""duplicate 'te_id' import with altered details"""
original_patient = Patient.objects.get(te_id='02-12345')
original_history_count = original_patient.history.count()
patient = self.importer.update_local_patient(self.user,
create_instance(PatientUpdate, {
'te_id': '02-12345',
'age': '35',
'sex': 'Female',
'celphone':'0821234321'
}))
patient = reload_record(patient)
self.assertEquals(
patient.history.latest().get_history_type_display(),
'Changed'
)
self.assertEquals(patient.age, 35)
self.assertEquals(patient.sex, 'f')
self.assertEquals(patient.msisdns.latest('id').msisdn, '27821234321')
self.assertEquals(patient.history.count(), original_history_count + 1) # this is an update, should have a new history item
def testDuplicateMsisdnImport(self):
"""duplicate 'msisdn' import"""
# new patient, not in fixtures
patientA = self.importer.update_local_patient(self.user,
create_instance(PatientUpdate, {
'te_id': '03-12345',
'age': '30',
'sex': 'Male',
'celphone': '0821111111'
}))
# existing patient, in fixtures
patientB = self.importer.update_local_patient(self.user,
create_instance(PatientUpdate, {
'te_id': '01-12345',
'age': '30',
'sex': 'Male',
'celphone': '0821111111'
}))
patientA = reload_record(patientA)
patientB = reload_record(patientB)
self.assertEquals(
patientA.history.latest().get_history_type_display(),
'Created'
)
self.assertEquals(
patientB.history.latest().get_history_type_display(),
'Changed'
)
# both phone numbers should point to the same MSISDN record
# in the database
self.assertEqual(
patientA.msisdns.latest('id').id,
patientB.msisdns.latest('id').id
)
def testCountryCodeMsisdn(self):
"""country code included in 'msisdn'"""
patient = self.importer.update_local_patient(self.user,
create_instance(PatientUpdate, {
'te_id': '03-12345',
'age': '55',
'sex': 'Male',
'celphone': '+27823211234'
}))
patient = reload_record(patient)
self.assertEquals(patient.history.latest().get_history_type_display(),
'Created')
self.assertEquals(patient.msisdns.latest('id').msisdn, '27823211234')
def testMultipleMsisdn(self):
"""multiple 'msisdn' import (ons country code 'msisdn' without plus)"""
patient = self.importer.update_local_patient(self.user,
create_instance(PatientUpdate, {
'te_id': '03-12345',
'age': '18',
'sex': 'Female',
'celphone': '0821231111/27821232222'
}))
patient = reload_record(patient)
self.assertEquals(patient.history.latest().get_history_type_display(),
'Created')
msisdns = patient.msisdns.all()
self.assertEqual(len(msisdns), 2)
self.assertEqual(msisdns[0].msisdn, '27821232222')
self.assertEqual(msisdns[1].msisdn, '27821231111')
class VisitImportTestCase(TestCase):
fixtures = ['patients.json', 'clinics.json', 'visits.json',]
def setUp(self):
self.clinic = Clinic.objects.get(te_id='01')
self.importer = Importer()
self.user = User.objects.get(username='kumbu')
def testInvalidImport(self):
"""attempt import of an invalid record"""
self.assertRaises(Patient.DoesNotExist, # exception
self.importer.update_local_coming_visit, # callback
self.user, # args
self.clinic,
create_instance(
ComingVisit, {
'key_id': '123456789',
'te_id': '01-1245',
'scheduled_visit_date':'2080-26 00:00:00'
})
)
def testNewVisit(self):
"""import a new visit"""
visit = self.importer.update_local_coming_visit(
self.user,
self.clinic,
create_instance(ComingVisit, {
'key_id': '02-123456789',
'te_id': '01-12345',
'scheduled_visit_date': '2100-06-01 00:00:00'
})
)
visit = reload_record(visit)
self.assertEqual(visit.history.latest().get_history_type_display(),
'Created')
self.assertEquals(visit.te_visit_id, '02-123456789')
self.assertEquals(visit.patient.te_id, '01-12345')
self.assertEquals(visit.date, date(2100, 6, 1))
def testIndicateReschedule(self):
"""reschedule a visit"""
missed_future_date = date(2200,5,1)
# make sure we have a visit to reschedule
original_visit = Visit.objects.get(te_visit_id='01-123456789')
# make sure the updated date is actually in the future
self.assertTrue(original_visit.date < missed_future_date)
visit = self.importer.update_local_missed_visit(
self.user,
self.clinic,
create_instance(MissedVisit, {
'key_id': '01-123456789',
'te_id': '01-12345',
'missed_date': '%s 00:00:00' % missed_future_date # future date should be seen as a reschedule
})
)
visit = reload_record(visit)
self.assertEqual(visit.history.latest().get_history_type_display(),
'Changed')
self.assertEquals(visit.status, 'r')
self.assertEquals(visit.date, date(2200, 5, 1))
def testIndicateMissed(self):
"""indicate a missed visit"""
visit = self.importer.update_local_missed_visit(
self.user,
self.clinic,
create_instance(MissedVisit, {
'key_id': '01-123456799',
'te_id': '01-12345',
'missed_date': date.today().strftime('%Y-%m-%d 00:00:00')
})
)
visit = reload_record(visit)
# event = importMissedVisit(self.event, self.clinic, {'key_id':'01-123456789', 'te_id':'01-12345', 'missed_date':'2100-06-01 00:00:00'})
self.assertEqual(visit.history.latest().get_history_type_display(),
'Created')
self.assertEquals(visit.status, 'm')
self.assertEquals(visit.date, date.today())
def testIndicateAttended(self):
"""indicate an attended visit"""
visit = self.importer.update_local_done_visit(
self.user,
self.clinic,
create_instance(DoneVisit, {
'key_id': '01-123456789',
'te_id': '01-12345',
'done_date': '2100-07-01 00:00:00'
})
)
visit = reload_record(visit)
self.assertEquals(visit.history.latest().get_history_type_display(),
'Changed')
self.assertEquals(visit.status, 'a')
self.assertEquals(visit.date, date(2100, 7, 1))
def testIndicateNewAttended(self):
"""indicate a new attended visit"""
visit = self.importer.update_local_done_visit(
self.user,
self.clinic,
create_instance(DoneVisit, {
'key_id': '02-123456789',
'te_id': '01-12345',
'done_date': '2100-07-01 00:00:00'
})
)
visit = reload_record(visit)
self.assertEqual(visit.history.latest().get_history_type_display(),
'Created')
self.assertEquals(visit.status, 'a')
self.assertEquals(visit.date, date(2100, 7, 1))
def testIndicateNewMissed(self):
# indicate a new attended visit
yesterday = date.today() - timedelta(days=1)
visit = self.importer.update_local_missed_visit(
self.user,
self.clinic,
create_instance(MissedVisit, {
'key_id': '02-123456789',
'te_id': '01-12345',
'missed_date': yesterday.strftime('%Y-%m-%d 00:00:00')
})
)
visit = reload_record(visit)
self.assertEqual(visit.history.latest().get_history_type_display(),
'Created')
self.assertEquals(visit.status, 'm')
self.assertEquals(visit.date, yesterday)
def testDelete(self):
"""delete a visit"""
visit = self.importer.update_local_deleted_visit(self.user, create_instance(DeletedVisit, {
'key_id': '01-123456789',
'te_id': '01-12345'
}))
self.assertEqual(visit.deleted, True)
self.assertEqual(visit.history.latest().get_history_type_display(),
'Changed') # it is Changed because of the soft delete
self.assertRaises(Visit.DoesNotExist, # exception
Visit.objects.get, # callback
pk=visit.pk # args
)
class PatientRiskProfileTestCase(TestCase):
fixtures = ['clinics.json', 'patients.json',]
def setUp(self):
self.patient = Patient.objects.all()[0]
self.importer = Importer()
self.clinic = Clinic.objects.get(te_id='01')
self.user = User.objects.get(username='kumbu')
def reload_patient(self):
return reload_record(self.patient)
def test_risk_profile_calculation(self):
today = timezone.now() - timedelta(days=1)
visit = self.importer.update_local_missed_visit(
self.user,
self.clinic,
create_instance(MissedVisit, {
'key_id':'02-123456789',
'te_id':self.patient.te_id,
'missed_date': today.strftime('%Y-%m-%d 00:00:00')
})
)
self.assertEquals(visit.status, 'm') # make sure it's flagged as missed
# otherwise this won't make sense
self.assertEquals(self.reload_patient().risk_profile, 1.0)
def test_risk_profile_incremental_calculation(self):
yesterday = timezone.now() - timedelta(days=1)
two_days_ago = yesterday - timedelta(days=1)
# attended
visit1 = self.importer.update_local_done_visit(
self.user,
self.clinic,
create_instance(DoneVisit, {
'key_id': '02-123456701',
'te_id': self.patient.te_id,
'done_date': '2100-07-01 00:00:00'
}))
# attended
visit2 = self.importer.update_local_done_visit(
self.user,
self.clinic,
create_instance(DoneVisit, {
'key_id': '02-123456702',
'te_id': self.patient.te_id,
'done_date': '2100-07-02 00:00:00'
}))
# we've attended all our visits, our risk profile should be zero
self.assertEquals(self.reload_patient().risk_profile, 0.0)
# missed
visit3 = self.importer.update_local_missed_visit(
self.user,
self.clinic,
create_instance(MissedVisit, {
'key_id': '02-123456703',
'te_id': self.patient.te_id,
'missed_date': yesterday.strftime('%Y-%m-%d 00:00:00')
}))
# attended two out of three, 33% risk
self.assertAlmostEquals(self.reload_patient().risk_profile, 0.33, places=2)
visit4 = self.importer.update_local_missed_visit(
self.user,
self.clinic,
create_instance(MissedVisit, {
'key_id': '02-123456704',
'te_id': self.patient.te_id,
'missed_date': two_days_ago.strftime('%Y-%m-%d 00:00:00')
}))
# attended two out of 4, 50% risk
self.assertAlmostEquals(self.reload_patient().risk_profile, 0.50, places=2)
|
praekelt/txtalert
|
txtalert/apps/therapyedge/tests/importing.py
|
Python
|
gpl-3.0
| 15,487
|
[
"VisIt"
] |
8a993878177d49948d0107702bcba5adb6dd4f56e9a7f3dde95d8fc08de1e44b
|
"""
IO for LAMMPS
"""
|
gVallverdu/pymatgen
|
pymatgen/io/lammps/__init__.py
|
Python
|
mit
| 22
|
[
"LAMMPS"
] |
79ffbaef5951daa8908d4cae729dcfe4b17222d2e34ed46f5f935154ef0ef3b1
|
"""
Fourier Transform
-Find Fourier Transform of images using OpenCV
-utilize FFT functions in Numpy
-FT applications
functions:
cv2.
dft()
idft()
FT used to analyze freq characteristics of filters
for images
2D Discrete Fourier Transform used to find frequency domain
FFT calculates DFT
sinusoidal signal: x(t)=A * sin(2 * \pi *f * t)
f - freq signal
if freq domain taken, can see a spike at f
if signal sampled to form discrete signal, get same freq domain, but periodic in range:
[- \pi , \pi] or [0, 2 * \pi] (or [0, N] for N-pt DFT)
consider image a signal sampled in 2 directions
taking FT in both X and Y dirs gives freq representation of image
for sinusoidal signal, if ampl varies fast in time -> hi freq signal
for images:
amplitude varies drastically at edge points or noises
therefore edges and noises high freq contents of image
no changes in amplitude: lo freq component
"""
# FT in Numpy
# numpy has FFT package
# np.fft.fft2 prov. freq transform which is complex array
# arguments:
# input image (grayscale)
# size of output array; if greater than size of input image, input image padded w/ 0s before calculation of FFT
# less than input image: input image cropped
# no args passes: output size same as input
# result: zero freq component @ top left corner
# to bring to center: shift result by N/2 in both directions
# done by np.fft.fftshift()
# once find frequency transform -> find magnitude spectrum
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('messi5.jpg', 0)
f = np.fft.fft2(img)
fshift = np.fft.fftshift(f)
magnitude_spectrum = 20*np.log(np.abs(fshift))
plt.subplot(121), plt.imshow(img, cmap='gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(magnitude_spectrum, cmap = 'gray')
plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
plt.show()
# can see whiter region at center, showing low freq content is prominent
# ^ found freq transform; now, can do ops in freq domain
# hi pass filtering
# image reconstruction (ie find inverse DFT)
# remove lo freqs with rectangular window, size 60x60
# apply inverse shift using np.fft.ifftshift()
# so DC component is again at top right hand corner
# find inverse FFT using np.ifft2()
# result complex #; take its abs value
rows, cols = img.shape
crow, ccol = rows/2, cols/2
fshift[crow-30:crow+30, ccol-30:ccol+30] = 0
f_ishift = np.fft.ifftshift(fshift)
img_back = np.fft.ifft2(f_ishift)
img_back = np.abs(img_back)
plt.subplot(131),plt.imshow(img, cmap = 'gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(132),plt.imshow(img_back, cmap = 'gray')
plt.title('Image after HPF'), plt.xticks([]), plt.yticks([])
plt.subplot(133),plt.imshow(img_back)
plt.title('Result in JET'), plt.xticks([]), plt.yticks([])
plt.show()
# don't use rectangular filters for masking
# create ripple-like ringing effects
# mask converted to sinc shape, causing problem
# use Gaussian window instead
# Fourier Transform in OpenCV
# functions: cv2.dft() and cv2.idft()
# same result as before, but in 2 channels
# 1st channel: real part of result
# 2nd channel: imaginary part
# convert input image to np.float32 first
import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('messi5.jpg', 0)
dft = cv2.dft(np.float32(img), flags = cv2.DFT_COMPLEX_OUTPUT)
dft_shift = np.fft.fftshift(dft)
magnitude_spectrum = 20 * np.log(cv2.magnitude(dft_shift[:,:,0], dft_shift[:,:,1]))
plt.subplot(121), plt.imshow(img, cmap = 'gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(magnitude_spectrum, cmap = 'gray')
plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
plt.show()
# NOTE: use cv2.cartToPolar(), which returns both magnitude and phase
# now, we do inverse DFT
# previously, we created HPF
# now, remove hi freq contents of image
# -> apply LPF
# blurs the image
# create a mask first with high value, 1, @ low freq
# ie pass LF content
# 0 at HF region
rows, cols = img.shape
crow, ccol = rows/2, cols/2
# create mask first, center square is 1, all remaining zeros
mask = np.zeros((rows, cols, 2), np.uint8)
mask[crow-30:crow+30, ccol-30:ccol+30] = 1
# apply mask and iDFT
fshift = dft_shift * mask
f_ishift = np.fft.ifftshift(fshift)
img_back = cv2.idft(f_ishift)
img_back = cv2.magnitude(img_back[:,:,0], img_back[:,:,1])
plt.subplot(121), plt.imshow(img, cmap = 'gray)
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(img_back, cmap = 'gray)
plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
plt.show()
|
SSG-DRD-IOT/commercial-iot-security-system
|
opencv/tutorials/imageProcessing/transform/fourier.py
|
Python
|
mit
| 4,853
|
[
"Gaussian"
] |
3b5a6dff59e0d3faaf5419c58e626afa95b4ac6f983e074febe99465148f77a8
|
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from typing import Union
from typing import Callable
from kivy.uix.widget import Widget
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.popup import Popup
from kivy.uix.button import Button
from ORCA.ui.BasePopup import cBasePopup,SettingSpacer
from ORCA.vars.Replace import ReplaceVars
from ORCA.widgets.core.ScrollableLabelLarge import cScrollableLabelLarge
import ORCA.Globals as Globals
__all__ = ['cRaiseQuestion','ShowQuestionPopUp']
class cRaiseQuestion(cBasePopup):
""" Shows a question popup """
def __init__(self):
super(cRaiseQuestion, self).__init__()
self.oBtnlayout:Union[BoxLayout,None] = None
self.oLabel:Union[cScrollableLabelLarge,None] = None
self.oBtn1:Union[Button,None] = None
self.oBtn2:Union[Button,None] = None
self.oBtnDetails:Union[Button,None] = None
self.uMessage:str = u''
self.fktYes:Union[Callable,None] = None
self.fktNo:Union[Callable,None] = None
self.uStringDetails:str = ''
def RaiseQuestion(self,*,uTitle:str='',uMessage:str='',fktYes:Union[Callable,None]=None,fktNo:Union[Callable,None]=None,uStringYes:str='',uStringNo:str='',uStringDetails:str='') -> Popup:
""" Shows the question """
oContent:BoxLayout = BoxLayout(orientation='vertical', spacing='5dp')
self.uMessage = uMessage
self.oPopup = Popup(title=ReplaceVars(uTitle),content=oContent, size=(Globals.iAppWidth*0.9,Globals.iAppHeight*0.9),size_hint=(None, None),auto_dismiss=False)
self.oLabel = cScrollableLabelLarge(text=ReplaceVars(uMessage),size_hint=(1, None),size=(Globals.iAppWidth*0.86, Globals.iAppHeight*0.4),markup = True, noxscroll=True,)
oContent.add_widget(Widget())
oContent.add_widget(self.oLabel)
oContent.add_widget(Widget())
oContent.add_widget(SettingSpacer())
self.fktYes=fktYes
self.fktNo=fktNo
# 2 buttons are created for accept or cancel the current value
self.oBtnlayout = BoxLayout(size_hint_y=None, height='50dp', spacing='5dp')
if uStringYes!='':
self.oBtn1 = Button(text=ReplaceVars(uStringYes))
self.oBtn1.bind(on_release=self.fktYES)
self.oBtnlayout.add_widget(self.oBtn1)
if uStringDetails!='':
self.uStringDetails=uStringDetails
self.oBtnDetails = Button(text=ReplaceVars('$lvar(452)'))
self.oBtnDetails.bind(on_release=self.fktDetails)
self.oBtnlayout.add_widget(self.oBtnDetails)
if uStringNo!='':
self.oBtn2 = Button(text=ReplaceVars(uStringNo))
self.oBtn2.bind(on_release=self.fktNO)
self.oBtnlayout.add_widget(self.oBtn2)
oContent.add_widget(self.oBtnlayout)
self.oPopup.open()
return self.oPopup
# noinspection PyUnusedLocal
def fktDetails(self, *largs) -> None:
""" switch between details and core message """
if self.oLabel.text==self.uMessage:
self.oLabel.text=self.uStringDetails
else:
self.oLabel.text=self.uMessage
# noinspection PyUnusedLocal
def fktYES(self, *largs) -> None:
""" handles pressing the yes button """
cBasePopup.ClosePopup(self)
if self.fktYes:
return self.fktYes()
return None
# noinspection PyUnusedLocal
def fktNO(self, *largs) -> None:
""" handles pressing the no button """
cBasePopup.ClosePopup(self)
if self.fktNo:
return self.fktNo()
return None
def ShowQuestionPopUp(*,uTitle:str='',uMessage:str='',fktYes:Union[Callable,None]=None,fktNo:Union[Callable,None]=None,uStringYes:str='',uStringNo:str='', uSound:str=u'question'):
""" all in a function """
Globals.oSound.PlaySound(uSoundName=uSound)
oRaiseQuestion = cRaiseQuestion()
oRaiseQuestion.RaiseQuestion(uTitle=uTitle, uMessage=uMessage, fktYes=fktYes, fktNo=fktNo, uStringYes=uStringYes, uStringNo=uStringNo)
return oRaiseQuestion
|
thica/ORCA-Remote
|
src/ORCA/ui/RaiseQuestion.py
|
Python
|
gpl-3.0
| 5,312
|
[
"ORCA"
] |
d25aa4ca2da5f19aa715833b6a7c602506bc4e250e9f14f07915d3962466df46
|
"""
Copyright (C) 2018 Roberto Bruttomesso <roberto.bruttomesso@gmail.com>
This file is distributed under the terms of the 3-clause BSD License.
A copy of the license can be found in the root directory or at
https://opensource.org/licenses/BSD-3-Clause.
Author: Roberto Bruttomesso <roberto.bruttomesso@gmail.com>
Date: 02/11/2018
This module implements a printer for the parsed AST
"""
from intrepyd.iec611312py.visitor import Visitor
from intrepyd.iec611312py.statement import Assignment
from intrepyd.iec611312py.expression import VariableOcc, Expression
def termAsString(term):
printer = StmtPrinter()
printer.visit(term)
return printer.result
class StmtPrinter(Visitor):
"""
Visitor for printing Statements on a string
"""
def __init__(self):
self._result = ''
@property
def result(self):
return self._result
def processStatements(self, statements):
for statement in statements:
statement.accept(self)
def _visit_assignment(self, obj):
obj.lhs.accept(self)
self._result += ' := '
obj.rhs.accept(self)
self._result += ';'
def _visit_ifthenelse(self, obj):
first = True
if len(obj.conditions) != len(obj.stmt_blocks):
raise RuntimeError('Wrong number of conditions and statements in if then else')
for i in range(len(obj.conditions)):
if first:
self._result += 'IF '
first = False
else:
self._result += 'ELSIF '
obj.conditions[i].accept(self)
self._result += ' THEN '
for statement in obj.stmt_blocks[i]:
statement.accept(self)
self._result += ' '
self._result += 'END_IF;'
def _visit_case(self, obj):
selections = obj.selections
statements = obj.stmt_blocks
if len(selections) != len(statements):
raise RuntimeError('Wrong number of selections and statements in case')
self._result += 'CASE '
obj.expression.accept(self)
self._result += ' OF '
for i in range(len(selections)):
for selection in selections[i]:
selection.accept(self)
self._result += ': '
for statement in statements[i]:
statement.accept(self)
self._result += ' '
self._result += 'END_CASE;'
def _visit_expression(self, expression):
args = expression.arguments
nargs = len(args)
if nargs == 1:
self._result += expression.operator + '('
expression.arguments[0].accept(self)
self._result += ')'
return
elif nargs == 2:
self._result += '('
args[0].accept(self)
self._result += ' ' + expression.operator + ' '
args[1].accept(self)
self._result += ')'
def _visit_ite(self, ite):
self._result += 'ite('
ite.condition.accept(self)
self._result += ', '
ite.then_term.accept(self)
self._result += ', '
ite.else_term.accept(self)
self._result += ')'
def _visit_variable_occ(self, variableOcc):
self._result += variableOcc.var.name
def _visit_constant_occ(self, constantOcc):
self._result += constantOcc.cst
|
formalmethods/intrepyd
|
intrepyd/iec611312py/stmtprinter.py
|
Python
|
bsd-3-clause
| 3,378
|
[
"VisIt"
] |
1ba272f2df278f74c1b32906c5445e6f0f8ea37e2bd51bac8b9fddfe53b82818
|
from __future__ import division
import numpy as np
import sklearn.pipeline
from mdtraj.testing import eq
from sklearn.externals.joblib import dump as jl_dump
from msmbuilder.decomposition import tICA
from msmbuilder.utils import Subsampler, dump, load
from .test_commands import tempdir
random = np.random.RandomState(2)
def test_subsampler_lag1():
n_traj, n_samples, n_features = 3, 100, 7
lag_time = 1
X_all_0 = [random.normal(size=(n_samples, n_features))
for i in range(n_traj)]
q_0 = np.concatenate(X_all_0)
subsampler = Subsampler(lag_time=lag_time)
X_all_1 = subsampler.transform(X_all_0)
q_1 = np.concatenate(X_all_1)
eq(q_0.shape, q_1.shape)
eq(q_0.mean(0), q_1.mean(0))
eq(q_0.std(0), q_1.std(0))
subsampler = Subsampler(lag_time=lag_time, sliding_window=False)
X_all_1 = subsampler.transform(X_all_0)
q_1 = np.concatenate(X_all_1)
eq(q_0.shape, q_1.shape)
eq(q_0.mean(0), q_1.mean(0))
eq(q_0.std(0), q_1.std(0))
def test_subsampler_lag2():
n_traj, n_samples, n_features = 3, 100, 7
lag_time = 2
X_all_0 = [random.normal(size=(n_samples, n_features))
for i in range(n_traj)]
q_0 = np.concatenate(X_all_0)
subsampler = Subsampler(lag_time=lag_time)
X_all_1 = subsampler.transform(X_all_0)
q_1 = np.concatenate(X_all_1)
eq(((n_samples - lag_time + 2) * n_traj, n_features), q_1.shape)
subsampler = Subsampler(lag_time=lag_time, sliding_window=False)
X_all_1 = subsampler.transform(X_all_0)
q_1 = np.concatenate(X_all_1)
eq(((n_samples / lag_time) * n_traj, n_features), q_1.shape)
def test_subsampler_tica():
n_traj, n_samples, n_features = 1, 500, 4
lag_time = 2
X_all_0 = [random.normal(size=(n_samples, n_features))
for i in range(n_traj)]
tica_0 = tICA(lag_time=lag_time)
tica_0.fit(X_all_0)
subsampler = Subsampler(lag_time=lag_time)
tica_1 = tICA()
pipeline = sklearn.pipeline.Pipeline([
("subsampler", subsampler),
('tica', tica_1)
])
pipeline.fit(X_all_0)
eq(tica_0.n_features, tica_1.n_features) # Obviously true
eq(tica_0.n_observations_, tica_1.n_observations_)
# The eigenvalues should be the same. NOT the timescales,
# as tica_1 has timescales calculated in a different time unit
eq(tica_0.eigenvalues_, tica_1.eigenvalues_)
def test_dump_load():
data = dict(name="Fancy_name", arr=np.random.rand(10, 5))
with tempdir():
dump(data, 'filename')
data2 = load('filename')
eq(data, data2)
def test_load_legacy():
# Used to save joblib files
data = dict(name="Fancy_name", arr=np.random.rand(10, 5))
with tempdir():
jl_dump(data, 'filename', compress=1)
data2 = load('filename')
eq(data, data2)
|
mpharrigan/mixtape
|
msmbuilder/tests/test_utils.py
|
Python
|
lgpl-2.1
| 2,830
|
[
"MDTraj"
] |
a04f0f6e98424efd16c092a1ff36f9ef4407d6436401d889d59067406d20a44c
|
"""Test the dirac-transformation-replication script and helper"""
import unittest
from mock import MagicMock as Mock, patch
from DIRAC import S_OK, S_ERROR
from DIRAC.TransformationSystem.Utilities.ReplicationTransformation import createDataTransformation
from DIRAC.TransformationSystem.Utilities.ReplicationCLIParameters import Params
__RCSID__ = "$Id$"
GET_VOMS = "DIRAC.TransformationSystem.Utilities.ReplicationCLIParameters.getVOMSVOForGroup"
GET_PROXY = "DIRAC.TransformationSystem.Utilities.ReplicationCLIParameters.getProxyInfo"
def getProxyMock(success=True):
""" return value for getProxy """
if success:
return Mock(return_value=S_OK({'groupProperties': ['ProductionManagement'],
'group': 'clic_prod',
}))
return Mock(return_value=S_ERROR("Failed"))
def opMock():
""" return mock for config operations """
opmock = Mock()
opmock.getOptionsDict.return_value = S_OK({'trans': 'ProdID'})
opmock.getValue.return_value = 'ProdID'
return Mock(return_value=opmock)
class TestMoving(unittest.TestCase):
"""Test the creation of moving transformation"""
def setUp(self):
self.tClientMock = Mock()
self.tClientMock.createTransformationInputDataQuery.return_value = S_OK()
self.tMock = Mock(return_value=self.tClientMock)
def tearDown(self):
pass
def test_createRepl_1(self):
""" test creating transformation """
tSE = "Target-SRM"
sSE = "Source-SRM"
prodID = 12345
module_name = "DIRAC.TransformationSystem.Utilities.ReplicationTransformation"
trmodule = "DIRAC.TransformationSystem.Client.Transformation.Transformation"
with patch(trmodule + ".getTransformation", new=Mock(return_value=S_OK({}))), \
patch(trmodule + ".addTransformation", new=Mock(return_value=S_OK())), \
patch(trmodule + "._Transformation__setSE", new=Mock(return_value=S_OK())), \
patch("%s.TransformationClient" % module_name, new=self.tMock):
ret = createDataTransformation('Moving', tSE, sSE, 'prodID', prodID, enable=True)
self.assertTrue(ret['OK'], ret.get('Message', ""))
def test_createRepl_Dry(self):
""" test creating transformation """
tSE = "Target-SRM"
sSE = "Source-SRM"
prodID = 12345
module_name = "DIRAC.TransformationSystem.Utilities.ReplicationTransformation"
trmodule = "DIRAC.TransformationSystem.Client.Transformation.Transformation"
with patch(trmodule + ".getTransformation", new=Mock(return_value=S_OK({}))), \
patch(trmodule + ".addTransformation", new=Mock(return_value=S_OK())), \
patch(trmodule + "._Transformation__setSE", new=Mock(return_value=S_OK())), \
patch("%s.TransformationClient" % module_name, new=self.tMock):
ret = createDataTransformation('Moving', tSE, sSE, 'prodID', prodID, enable=False, extraData={})
self.assertTrue(ret['OK'], ret.get('Message', ""))
def test_createRepl_2(self):
""" test creating transformation """
tSE = "Target-SRM"
sSE = "Source-SRM"
prodID = 12345
module_name = "DIRAC.TransformationSystem.Utilities.ReplicationTransformation"
trmodule = "DIRAC.TransformationSystem.Client.Transformation.Transformation"
with patch(trmodule + ".getTransformation", new=Mock(return_value=S_OK({}))), \
patch(trmodule + ".addTransformation", new=Mock(return_value=S_OK())), \
patch(trmodule + "._Transformation__setSE", new=Mock(return_value=S_OK())), \
patch("%s.TransformationClient" % module_name, new=self.tMock):
ret = createDataTransformation('Moving', tSE, sSE, 'prodID', prodID, extraname="extraName", enable=True)
self.assertTrue(ret['OK'], ret.get('Message', ""))
def test_createRepl_SEFail_1(self):
""" test creating transformation """
tSE = "Target-SRM"
sSE = "Source-SRM"
prodID = 12345
module_name = "DIRAC.TransformationSystem.Utilities.ReplicationTransformation"
trmodule = "DIRAC.TransformationSystem.Client.Transformation.Transformation"
with patch(trmodule + ".getTransformation", new=Mock(return_value=S_OK({}))), \
patch(trmodule + ".addTransformation", new=Mock(return_value=S_OK())), \
patch(trmodule + "._Transformation__setSE", new=Mock(side_effect=(S_OK(), S_ERROR()))), \
patch("%s.TransformationClient" % module_name, new=self.tMock):
ret = createDataTransformation('Moving', tSE, sSE, 'prodID', prodID, enable=True)
self.assertFalse(ret['OK'], str(ret))
self.assertIn("TargetSE not valid", ret['Message'])
def test_createRepl_SEFail_2(self):
""" test creating transformation """
tSE = "Target-SRM"
sSE = "Source-SRM"
prodID = 12345
module_name = "DIRAC.TransformationSystem.Utilities.ReplicationTransformation"
trmodule = "DIRAC.TransformationSystem.Client.Transformation.Transformation"
with patch(trmodule + ".getTransformation", new=Mock(return_value=S_OK({}))), \
patch(trmodule + ".addTransformation", new=Mock(return_value=S_OK())), \
patch(trmodule + "._Transformation__setSE", new=Mock(side_effect=(S_ERROR(), S_ERROR()))), \
patch("%s.TransformationClient" % module_name, new=self.tMock):
ret = createDataTransformation('Moving', tSE, sSE, 'prodID', prodID, enable=True)
self.assertFalse(ret['OK'], str(ret))
self.assertIn("SourceSE not valid", ret['Message'])
def test_createRepl_addTrafoFail_(self):
""" test creating transformation """
tSE = "Target-SRM"
sSE = "Source-SRM"
prodID = 12345
module_name = "DIRAC.TransformationSystem.Utilities.ReplicationTransformation"
trmodule = "DIRAC.TransformationSystem.Client.Transformation.Transformation"
with patch(trmodule + ".getTransformation", new=Mock(return_value=S_OK({}))), \
patch(trmodule + ".addTransformation", new=Mock(return_value=S_ERROR("Cannot add Trafo"))), \
patch(trmodule + "._Transformation__setSE", new=Mock(return_value=S_OK())), \
patch("%s.TransformationClient" % module_name, new=self.tMock):
ret = createDataTransformation('Moving', tSE, sSE, 'prodID', prodID, enable=True)
self.assertFalse(ret['OK'], str(ret))
self.assertIn("Cannot add Trafo", ret['Message'])
def test_createRepl_createTrafoFail_(self):
""" test creating transformation """
tSE = "Target-SRM"
sSE = "Source-SRM"
prodID = 12345
self.tClientMock.createTransformationInputDataQuery.return_value = S_ERROR("Failed to create IDQ")
module_name = "DIRAC.TransformationSystem.Utilities.ReplicationTransformation"
trmodule = "DIRAC.TransformationSystem.Client.Transformation.Transformation"
with patch(trmodule + ".getTransformation", new=Mock(return_value=S_OK({}))), \
patch(trmodule + ".addTransformation", new=Mock(return_value=S_OK())), \
patch(trmodule + "._Transformation__setSE", new=Mock(return_value=S_OK())), \
patch("%s.TransformationClient" % module_name, new=self.tMock):
ret = createDataTransformation('Moving', tSE, sSE, 'prodID', prodID, enable=True)
self.assertFalse(ret['OK'], str(ret))
self.assertIn("Failed to create IDQ", ret['Message'])
class TestParams(unittest.TestCase):
"""Test the parameters for the moving creation script"""
def setUp(self):
self.arguments = []
self.sMock = Mock()
self.sMock.getPositionalArgs.return_value = self.arguments
self.params = Params()
def tearDown(self):
pass
@patch(GET_PROXY, new=getProxyMock())
@patch(GET_VOMS, new=Mock(return_value='clic'))
def test_checkSettings(self):
self.arguments = ['12345', "TargetSE"]
self.sMock.getPositionalArgs.return_value = self.arguments
ret = self.params.checkSettings(self.sMock)
self.assertTrue(ret['OK'], ret.get("Message", ''))
self.assertEqual(self.params.metaValues, ['12345'])
self.assertEqual(self.params.sourceSE, '')
self.assertEqual(self.params.targetSE, ["TargetSE"])
@patch(GET_PROXY, new=getProxyMock())
@patch(GET_VOMS, new=Mock(return_value='clic'))
def test_setMetadata(self):
ret = self.params.setMetadata("Datatype:GEN, Energy: 124")
self.assertTrue(ret['OK'], ret.get("Message", ''))
self.assertEqual(self.params.extraData, {'Datatype': 'GEN',
'Energy': '124'})
@patch(GET_PROXY, new=getProxyMock())
@patch(GET_VOMS, new=Mock(return_value='clic'))
def test_checkSettings_FailArgumentSize(self):
self.arguments = ['12345', "TargetSE", 'Foo']
self.sMock.getPositionalArgs.return_value = self.arguments
ret = self.params.checkSettings(self.sMock)
self.assertFalse(ret['OK'], str(ret))
self.assertTrue(any("ERROR: Wrong number of arguments" in msg for msg in self.params.errorMessages))
@patch(GET_PROXY, new=getProxyMock(False))
@patch(GET_VOMS, new=Mock(return_value='clic'))
def test_FailProxy(self):
self.arguments = ['12345', "TargetSE"]
self.sMock.getPositionalArgs.return_value = self.arguments
ret = self.params.checkSettings(self.sMock)
self.assertFalse(ret['OK'], str(ret))
self.assertTrue(any("ERROR: No Proxy" in msg for msg in self.params.errorMessages), str(self.params.errorMessages))
@patch(GET_PROXY, new=getProxyMock(True))
@patch(GET_VOMS, new=Mock(return_value=''))
def test_FailProxy2(self):
self.arguments = ['12345', "TargetSE"]
self.sMock.getPositionalArgs.return_value = self.arguments
ret = self.params.checkSettings(self.sMock)
self.assertFalse(ret['OK'], str(ret))
self.assertTrue(any("ERROR: ProxyGroup" in msg for msg in self.params.errorMessages),
str(self.params.errorMessages))
def test_setExtraName(self):
ret = self.params.setExtraname("extraName")
self.assertTrue(ret['OK'], ret.get('Message', ""))
self.assertEqual("extraName", self.params.extraname)
|
arrabito/DIRAC
|
TransformationSystem/test/Test_replicationTransformation.py
|
Python
|
gpl-3.0
| 9,922
|
[
"DIRAC"
] |
825fd6f035fd21a39d597d53ffa545810cb35aae1173f73b2e69d85b40c7fa27
|
## Meant to use with Daniel Lowe's patent database
# reaction SMILES strings only, used for template extracction
from __future__ import print_function
import argparse
from numpy.random import shuffle # for random selection
from numpy.random import random
import rdkit.Chem as Chem # molecule building
from rdkit.Chem import AllChem
from collections import defaultdict
import rdkit.Chem.Draw as Draw
from rdkit import RDLogger
import datetime # for info files
import json # for dumping
import sys # for commanad line
import os # for file paths
import re
import itertools
from ochem_predict_nn.utils.database import collection_example_reactions_smilesonly
collection = collection_example_reactions_smilesonly()
def main(db_fpath, N = 15):
'''Read reactions from Lowe's patent reaction SMILES'''
# Open file
data_fid = open(db_fpath, 'r')
# Define scoring variables
total_templates = 0 # total reactions simulated (excludes skipped)
total_correct = 0 # actual products predicted
total_precise = 0 # ONLY actual products predicted
try: # to allow breaking
# Look for entries
documents = []
for i, line in enumerate(data_fid):
# Are we done?
if i == N:
break
# Unpack
line = line.strip()
reaction_smiles = line.split('\t')[0]
reference = line.split('\t')[1]
reaction_smiles = reaction_smiles.split(' ')[0]
# Load into database
documents.append(
{
'reaction_smiles': reaction_smiles,
'reference': reference,
'random': random(),
}
)
# Report progress and insert every 1000
if ((i+1) % 1000) == 0:
print('{}/{}'.format(i+1, N))
result = collection.insert(documents)
documents = []
result = collection.insert(documents)
except KeyboardInterrupt:
print('Stopped early!')
except Exception as e:
print(e)
print(line)
print('Created {} database entries'.format(collection.find().count()))
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('data_file', type = str,
help = 'File where each line is an atom-mapped smiles reaction')
parser.add_argument('-n', '--num', type = int, default = 50,
help = 'Maximum number of records to load; defaults to 50')
args = parser.parse_args()
clear = raw_input('Do you want to clear the {} existing examples? '.format(collection.find().count()))
if clear in ['y', 'Y', 'yes', '1', 'Yes']:
result = collection.delete_many({})
print('Cleared {} entries from collection'.format(result.deleted_count))
main(args.data_file, N = args.num)
|
connorcoley/ochem_predict_nn
|
data/load_lowe_examples_into_db_smilesonly.py
|
Python
|
mit
| 2,555
|
[
"RDKit"
] |
02d55a5278d7e61709c882265c858518b62c12d10cb672fa318beb0d2bc721d7
|
'''
Machine learning on SDSS galaxy data
'''
from sklearn import svm
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
import numpy as np
def find_params(model, data, labels, param_grid={}, test_frac=0.6, seed=500):
'''
Use a grid search to determine the optimum parameters for the given model.
'''
train_set, test_set = \
train_test_split(data, labels, test_size=test_frac, random_state=seed)
clf = GridSearchCV(model, param_grid)
clf.fit(train_set)
score = clf.score()
pars = clf.get_params()
return pars, score
def find_outliers(model, data, labels, params, train_frac=0.4, seed=520,
out_percent=0.98):
'''
Find outliers using the given model and data. Params should be found using
a grid search.
'''
train_set, test_set = \
train_test_split(data, labels, test_size=train_frac, random_state=seed)
mod = model(params)
mod.fit(train_set)
y_pred = mod.predict(test_set)
thresh = np.percentile(y_pred, out_percent)
if verbose:
print(classification_report(y_true, y_pred))
return predict > thresh
if __name__ == "__main__":
model = svm.OneClassSVM
|
e-koch/Phys-595
|
project_code/Machine Learning/learn_solution.py
|
Python
|
mit
| 1,294
|
[
"Galaxy"
] |
657b802ed4e0e8724ac6173ea013e542446a261f293e61d2253144665dc4ba7f
|
#!/usr/bin/env ipython
from pylab import *
from numpy import *
from scipy.io.netcdf import netcdf_file
from datetime import datetime, time, timedelta
#------------ shared libraries:
"""
--- antes de modificar cosas, tener en cuenta los bugs en:
'../../shared_lib/COMENTARIOS.txt'
"""
import sys
sys.path.append('../../../../shared_lib')
from shared_funcs_test import * #c_funcs import *
#------------------------------
#from read_NewTable import tshck, tini_icme, tend_icme, tini_mc, tend_mc, n_icmes, MCsig
from ShiftTimes import *
import numpy as np
from z_expansion_gulisano import z as z_exp
import console_colors as ccl
import read_NewTable as tb
class boundaries:
def __init__(self):
name = 'name'
HOME = os.environ['HOME']
gral = general()
day = 86400.
#---- cosas input
gral.fnames = fnames = {}
fnames['ACE'] = '%s/data_ace/64sec_mag-swepam/ace.1998-2014.nc' % HOME
fnames['McMurdo'] = '%s/actividad_solar/neutron_monitors/mcmurdo/mcmurdo_utc_correg.dat' % HOME
fnames['ACE_o7o6'] = '%s/data_ace/1hr_multi/ace.1998-2013.nc' % HOME
#fnames['table_richardson'] = '../../../../data_317events_iii.nc'
#fnames['table_richardson'] = '%s/ASOC_ICME-FD/icmes_richardson/data/data_317events_iii.nc' % HOME
fnames['table_richardson'] = '%s/ASOC_ICME-FD/icmes_richardson/data/rich_events_ace.nc' % HOME
#---- directorios de salida
gral.dirs = dirs = {}
dirs['dir_plots'] = '../plots'
dirs['dir_ascii'] = '../ascii'
dirs['suffix'] = '_luciano_' #'_test_Vmc_' # sufijo para el directorio donde guardare
# estas figuras
#-------------------------------------------------------------
#------- seleccionamos MCs con label-de-catalogo (lepping=2, etc)
#MCwant = {'flags': ('0', '1', '2', '2H'),
# 'alias': '0.1.2.2H'} # para "flagear" el nombre/ruta de las figuras
#MCwant = {'flags': ('1', '2', '2H'),
# 'alias': '1.2.2H'} # para "flagear" el nombre/ruta de las figuras
#MCwant = {'flags': ('2', '2H'),
# 'alias': '2.2H'} # para "flagear" el nombre/ruta de las figuras
MCwant = {'flags': ('2',),
'alias': '2'} # para "flagear" el nombre/ruta de las figuras
FILTER = {}
FILTER['Mcmultiple'] = False # True para incluir eventos multi-MC
FILTER['CorrShift'] = True
FILTER['wang'] = False #False #True
FILTER['vsw_filter'] = False
FILTER['z_filter_on'] = False
FILTER['MCwant'] = MCwant
FILTER['B_filter'] = False
FILTER['filter_dR.icme'] = False #True
CUTS = {}
CUTS['ThetaThres'] = 90.0 # all events with theta>ThetaThres
CUTS['dTday'] = 0.0
CUTS['v_lo'] = 550.0
CUTS['v_hi'] = 3000.0
CUTS['z_lo'] = -50.0
CUTS['z_hi'] = 0.65
nBin = {}
nBin['before'] = 2
nBin['after'] = 4
nBin['bins_per_utime'] = 50 # bins por unidad de tiempo
nBin['total'] = (1+nBin['before']+nBin['after'])*nBin['bins_per_utime']
fgap = 0.2
#--- bordes de estructura
bounds = boundaries()
bounds.tini = tb.tini_mc #tb.tini_mc #tb.tshck
bounds.tend = tb.tend_mc #tb.tend_mc #tb.tini_mc
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
gral.data_name = 'ACE'
FILTER['vsw_filter'] = False
emgr = events_mgr(gral, FILTER, CUTS, bounds, nBin, fgap, tb, z_exp)
emgr.run_all()
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#emgr.data_name = 'McMurdo'
#emgr.run_all()
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
emgr.data_name = 'ACE_o7o6'
emgr.run_all()
##
|
jimsrc/seatos
|
mcs/src/histos_centrales/src/c_rebineo_test.py
|
Python
|
mit
| 3,764
|
[
"NetCDF"
] |
f6f37d455061124d0a282022a1217b835e5428cfa815641426e1109238f79be2
|
#!/usr/bin/python
# __*__ coding: utf8 __*__
#
# This file is a part of Siesta Help Scripts
#
# (c) Andrey Sobolev, 2012
#
oneline = "Find nearest neighbors table"
#import os
import numpy as np
from base_numpy import model_base
try:
from voronoi.model_voronoi import model_voronoi as MV
except (ImportError,):
from shs.voronoi.model_voronoi import model_voronoi as MV
# --------------------------------------------------------------------
class Free_class:
pass
#=============================================================================
class model_ngbr(model_base):
def __init__(self,d={}):
model_base.__init__(self,d)
def make_verlet(self, r):
" Make Verlet for the model "
print "Verlet go. r=",r
vc = self.vc
crd = self.atoms['crd']
ver = Free_class()
ver.imax = (vc/r).astype(int) + 1
ver.dr = vc / ver.imax
ver.ind = np.mod((crd/ver.dr).astype(int), ver.imax)
self.verlet=ver
print "Verlet done"
def make_ngbr_short(self,r):
" makes Short Neighbours table "
print "Short NGBR go. r=",r
if not hasattr(self,'verlet'): self.make_verlet(r/2.5)
ng = Free_class()
ng.r = r
ind = self.verlet.ind
dr = self.verlet.dr
imax = self.verlet.imax
m = int(r/np.min(dr)) + 1
print 'm = ', m
ng.ind=[]
for vi in ind:
b0 = (np.abs(ind[:,0]-vi[0]) < m) | ((ind[:,0]-vi[0])%imax[0] < m) | ((vi[0] - ind[:,0])%imax[0] < m)
b1 = (np.abs(ind[:,1]-vi[1]) < m) | ((ind[:,1]-vi[1])%imax[1] < m) | ((vi[1] - ind[:,1])%imax[1] < m)
b2 = (np.abs(ind[:,2]-vi[2]) < m) | ((ind[:,2]-vi[2])%imax[2] < m) | ((vi[2] - ind[:,2])%imax[2] < m)
idn, = np.where(b0 * b1 * b2)
ng.ind.append(idn)
self.ngbr_short=ng
print "Short NGBR done"
def make_ngbr(self,r=None,part=''):
" makes Neighbours table with distances "
if r == None:
r = np.max(self.vc)/3.
print "NGBR numpy go. r=",r
# if not hasattr(self,'ngbr_short'): self.make_ngbr_short(r)
ng=Free_class()
r2=r*r
ng.r=r
# ngsh = self.ngbr_short.ind
# crd - to box, vc - orthogonal
crd, vc = to_orthogonal(self.atoms['crd'], self.vc)
ng.ind = []
vn, r2n = distance(crd, vc)
for iat in range(crd.shape[0]):
ivn = vn[iat]
ir2n = r2n[iat]
idn, = np.nonzero((ir2n < r2) & (ir2n > 0.))
ng.ind.append(np.rec.fromarrays([idn, ivn[idn], ir2n[idn]], names = 'n, vec, r2', formats = 'i4, 3f4, f4'))
self.ngbr=ng
print "NGBR numpy done"
def toMV(self):
''' get model_voronoi instance.
let it be so until we can get voronoi_numpy working somehow
'''
mv = MV()
# legend - to list
legend = list(self.atoms.dtype.names)
crd_index = legend.index('crd')
mv.legend = legend[:crd_index] + ['x','y','z'] + legend[crd_index+1:]
v1 = np.linalg.det(self.vc)
v2 = self.vc[0,0]*self.vc[1,1]*self.vc[2,2]
print 'NGBR.toMV: Vcell = %f, Vorth = %f' % (v1, v2)
mv.vc = [float(self.vc[0,0]),float(self.vc[1,1]),float(self.vc[2,2])]
# self.atoms - to box
self.atoms['crd'], vc = to_orthogonal(self.atoms['crd'], self.vc)
# atoms - to list of lists
mv.atoms = []
atoms_list = list(self.atoms)
for iat, line in enumerate(atoms_list):
mv.atoms.append([])
for el in flatten(line):
if type(el).__name__ == 'float64' or type(el).__name__ == 'float32':
mv.atoms[iat].append(float(el))
elif type(el).__name__ == 'int32':
mv.atoms[iat].append(int(el))
elif type(el).__name__ == 'string_':
mv.atoms[iat].append(str(el))
# ngbr - to dict
mv.ngbr = Free_class()
mv.ngbr.index = [{} for atom in mv.atoms]
for i, ind in enumerate(self.ngbr.ind):
for ng in ind:
mv.ngbr.index[i][ng['n']] = []
for el in flatten(list(ng))[1:]:
if type(el).__name__ == 'float64' or type(el).__name__ == 'float32':
mv.ngbr.index[i][ng['n']].append(float(el))
return mv
def distance(crd, vc):
''' Find distances between atoms based on PBC in a supercell built on vc vectors
In:
-> crd - coordinates array
-> vc - lattice vectors
-> n - a tuple of 2 crd index lists (or None if we need to find all-to-all distances)
'''
vc_inv = np.linalg.inv(vc)
crd_vc = np.dot(crd, vc_inv)
n = len(crd_vc)
sij = crd_vc[None,...]-crd_vc[:, None,...]
# periodic boundary conditions
sij[sij > 0.5] -= 1.0
sij[sij < -0.5] += 1.0
# print sij.shape
sij = sij.reshape(n*n, 3)
rij = np.dot(sij, vc)
r2 = (rij**2.0).sum(axis = 1)
return rij.reshape(n,n,3), r2.reshape(n,n)
def flatten(x):
result = []
for v in x:
if hasattr(v, '__iter__') and not isinstance(v, basestring):
result.extend(flatten(v))
else:
result.append(v)
return result
def to_orthogonal(crd, vc):
vc_inv = np.linalg.inv(vc)
crd = np.dot(crd, vc_inv)
crd[crd < 0.] += 1.
crd[crd > 1.] -= 1.
vc_diag = np.diag(vc)
orth_vc = np.diag(vc_diag)
crd = np.dot(crd, orth_vc)
return crd, orth_vc
|
ansobolev/shs
|
shs/voronoi/numpy/ngbr.py
|
Python
|
mit
| 5,555
|
[
"SIESTA"
] |
0f154af26f1791e2a9e41e311899227155683546819fcf6031c14a42a50cbcac
|
import calendar as tcalendar
import re
import binascii
import os
import hashlib
import json
import io
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
from calendar import monthrange
from datetime import date, datetime
from math import pi, sqrt
import vobject
import requests
from dateutil.relativedelta import relativedelta, MO
from django.conf import settings
from django.core.mail import send_mail
from django.core.mail import EmailMessage
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.urls import get_resolver, reverse
from django.db.models import Q, Sum, Case, When, IntegerField, Value, Count
from django.template.defaultfilters import pluralize
from django.template.loader import render_to_string
from django.utils import timezone
from jira import JIRA
from jira.exceptions import JIRAError
from django.dispatch import receiver
from dojo.signals import dedupe_signal
from dojo.models import Finding, Engagement, Finding_Template, Product, JIRA_PKey, JIRA_Issue, \
Dojo_User, User, Alerts, System_Settings, Notifications, UserContactInfo, Endpoint, Benchmark_Type, \
Language_Type, Languages, Rule
from asteval import Interpreter
from requests.auth import HTTPBasicAuth
import logging
logger = logging.getLogger(__name__)
deduplicationLogger = logging.getLogger("dojo.specific-loggers.deduplication")
"""
Helper functions for DefectDojo
"""
def sync_false_history(new_finding, *args, **kwargs):
if new_finding.endpoints.count() == 0:
eng_findings_cwe = Finding.objects.filter(
test__engagement__product=new_finding.test.engagement.product,
cwe=new_finding.cwe,
test__test_type=new_finding.test.test_type,
false_p=True, hash_code=new_finding.hash_code).exclude(id=new_finding.id).exclude(cwe=None)
eng_findings_title = Finding.objects.filter(
test__engagement__product=new_finding.test.engagement.product,
title=new_finding.title,
test__test_type=new_finding.test.test_type,
false_p=True, hash_code=new_finding.hash_code).exclude(id=new_finding.id)
total_findings = eng_findings_cwe | eng_findings_title
else:
eng_findings_cwe = Finding.objects.filter(
test__engagement__product=new_finding.test.engagement.product,
cwe=new_finding.cwe,
test__test_type=new_finding.test.test_type,
false_p=True).exclude(id=new_finding.id).exclude(cwe=None).exclude(endpoints=None)
eng_findings_title = Finding.objects.filter(
test__engagement__product=new_finding.test.engagement.product,
title=new_finding.title,
test__test_type=new_finding.test.test_type,
false_p=True).exclude(id=new_finding.id).exclude(endpoints=None)
total_findings = eng_findings_cwe | eng_findings_title
if total_findings.count() > 0:
new_finding.false_p = True
new_finding.active = False
new_finding.verified = False
super(Finding, new_finding).save(*args, **kwargs)
def is_deduplication_on_engagement_mismatch(new_finding, to_duplicate_finding):
return not new_finding.test.engagement.deduplication_on_engagement and to_duplicate_finding.test.engagement.deduplication_on_engagement
@receiver(dedupe_signal, sender=Finding)
def sync_dedupe(sender, *args, **kwargs):
system_settings = System_Settings.objects.get()
if system_settings.enable_deduplication:
new_finding = kwargs['new_finding']
deduplicationLogger.debug('sync_dedupe for: ' + str(new_finding.id) +
":" + str(new_finding.title))
# ---------------------------------------------------------
# 1) Collects all the findings that have the same:
# (title and static_finding and dynamic_finding)
# or (CWE and static_finding and dynamic_finding)
# as the new one
# (this is "cond1")
# ---------------------------------------------------------
if new_finding.test.engagement.deduplication_on_engagement:
eng_findings_cwe = Finding.objects.filter(
test__engagement=new_finding.test.engagement,
cwe=new_finding.cwe).exclude(id=new_finding.id).exclude(cwe=0).exclude(duplicate=True)
eng_findings_title = Finding.objects.filter(
test__engagement=new_finding.test.engagement,
title=new_finding.title).exclude(id=new_finding.id).exclude(duplicate=True)
else:
eng_findings_cwe = Finding.objects.filter(
test__engagement__product=new_finding.test.engagement.product,
cwe=new_finding.cwe).exclude(id=new_finding.id).exclude(cwe=0).exclude(duplicate=True)
eng_findings_title = Finding.objects.filter(
test__engagement__product=new_finding.test.engagement.product,
title=new_finding.title).exclude(id=new_finding.id).exclude(duplicate=True)
total_findings = eng_findings_cwe | eng_findings_title
deduplicationLogger.debug("Found " +
str(len(eng_findings_cwe)) + " findings with same cwe, " +
str(len(eng_findings_title)) + " findings with same title: " +
str(len(total_findings)) + " findings with either same title or same cwe")
# total_findings = total_findings.order_by('date')
for find in total_findings:
flag_endpoints = False
flag_line_path = False
flag_hash = False
if is_deduplication_on_engagement_mismatch(new_finding, find):
deduplicationLogger.debug(
'deduplication_on_engagement_mismatch, skipping dedupe.')
continue
# ---------------------------------------------------------
# 2) If existing and new findings have endpoints: compare them all
# Else look at line+file_path
# (if new finding is not static, do not deduplicate)
# ---------------------------------------------------------
if find.endpoints.count() != 0 and new_finding.endpoints.count() != 0:
list1 = [e.host_with_port for e in new_finding.endpoints.all()]
list2 = [e.host_with_port for e in find.endpoints.all()]
if all(x in list1 for x in list2):
flag_endpoints = True
elif new_finding.static_finding and len(new_finding.file_path) > 0:
if str(find.line) == str(new_finding.line) and find.file_path == new_finding.file_path:
flag_line_path = True
else:
deduplicationLogger.debug("no endpoints on one of the findings and file_path doesn't match")
else:
deduplicationLogger.debug("no endpoints on one of the findings and the new finding is either dynamic or doesn't have a file_path; Deduplication will not occur")
if find.hash_code == new_finding.hash_code:
flag_hash = True
deduplicationLogger.debug(
'deduplication flags for new finding ' + str(new_finding.id) + ' and existing finding ' + str(find.id) +
' flag_endpoints: ' + str(flag_endpoints) + ' flag_line_path:' + str(flag_line_path) + ' flag_hash:' + str(flag_hash))
# ---------------------------------------------------------
# 3) Findings are duplicate if (cond1 is true) and they have the same:
# hash
# and (endpoints or (line and file_path)
# ---------------------------------------------------------
if ((flag_endpoints or flag_line_path) and flag_hash):
deduplicationLogger.debug('New finding ' + str(new_finding.id) + ' is a duplicate of existing finding ' + str(find.id))
new_finding.duplicate = True
new_finding.active = False
new_finding.verified = False
new_finding.duplicate_finding = find
find.duplicate_list.add(new_finding)
find.found_by.add(new_finding.test.test_type)
super(Finding, new_finding).save()
def sync_rules(new_finding, *args, **kwargs):
rules = Rule.objects.filter(applies_to='Finding', parent_rule=None)
for rule in rules:
child_val = True
child_list = [val for val in rule.child_rules.all()]
while (len(child_list) != 0):
child_val = child_val and child_rule(child_list.pop(), new_finding)
if child_val:
if rule.operator == 'Matches':
if getattr(new_finding, rule.match_field) == rule.match_text:
if rule.application == 'Append':
set_attribute_rule(new_finding, rule, (getattr(
new_finding, rule.applied_field) + rule.text))
else:
set_attribute_rule(new_finding, rule, rule.text)
new_finding.save(dedupe_option=False,
rules_option=False)
else:
if rule.match_text in getattr(new_finding, rule.match_field):
if rule.application == 'Append':
set_attribute_rule(new_finding, rule, (getattr(
new_finding, rule.applied_field) + rule.text))
else:
set_attribute_rule(new_finding, rule, rule.text)
new_finding.save(dedupe_option=False,
rules_option=False)
def set_attribute_rule(new_finding, rule, value):
if rule.text == "True":
setattr(new_finding, rule.applied_field, True)
elif rule.text == "False":
setattr(new_finding, rule.applied_field, False)
else:
setattr(new_finding, rule.applied_field, value)
def child_rule(rule, new_finding):
if rule.operator == 'Matches':
if getattr(new_finding, rule.match_field) == rule.match_text:
return True
else:
return False
else:
if rule.match_text in getattr(new_finding, rule.match_field):
return True
else:
return False
def count_findings(findings):
product_count = {}
finding_count = {'low': 0, 'med': 0, 'high': 0, 'crit': 0}
for f in findings:
product = f.test.engagement.product
if product in product_count:
product_count[product][4] += 1
if f.severity == 'Low':
product_count[product][3] += 1
finding_count['low'] += 1
if f.severity == 'Medium':
product_count[product][2] += 1
finding_count['med'] += 1
if f.severity == 'High':
product_count[product][1] += 1
finding_count['high'] += 1
if f.severity == 'Critical':
product_count[product][0] += 1
finding_count['crit'] += 1
else:
product_count[product] = [0, 0, 0, 0, 0]
product_count[product][4] += 1
if f.severity == 'Low':
product_count[product][3] += 1
finding_count['low'] += 1
if f.severity == 'Medium':
product_count[product][2] += 1
finding_count['med'] += 1
if f.severity == 'High':
product_count[product][1] += 1
finding_count['high'] += 1
if f.severity == 'Critical':
product_count[product][0] += 1
finding_count['crit'] += 1
return product_count, finding_count
def findings_this_period(findings, period_type, stuff, o_stuff, a_stuff):
# periodType: 0 - weeks
# 1 - months
now = timezone.now()
for i in range(6):
counts = []
# Weeks start on Monday
if period_type == 0:
curr = now - relativedelta(weeks=i)
start_of_period = curr - relativedelta(
weeks=1, weekday=0, hour=0, minute=0, second=0)
end_of_period = curr + relativedelta(
weeks=0, weekday=0, hour=0, minute=0, second=0)
else:
curr = now - relativedelta(months=i)
start_of_period = curr - relativedelta(
day=1, hour=0, minute=0, second=0)
end_of_period = curr + relativedelta(
day=31, hour=23, minute=59, second=59)
o_count = {
'closed': 0,
'zero': 0,
'one': 0,
'two': 0,
'three': 0,
'total': 0
}
a_count = {
'closed': 0,
'zero': 0,
'one': 0,
'two': 0,
'three': 0,
'total': 0
}
for f in findings:
if f.mitigated is not None and end_of_period >= f.mitigated >= start_of_period:
o_count['closed'] += 1
elif f.mitigated is not None and f.mitigated > end_of_period and f.date <= end_of_period.date(
):
if f.severity == 'Critical':
o_count['zero'] += 1
elif f.severity == 'High':
o_count['one'] += 1
elif f.severity == 'Medium':
o_count['two'] += 1
elif f.severity == 'Low':
o_count['three'] += 1
elif f.mitigated is None and f.date <= end_of_period.date():
if f.severity == 'Critical':
o_count['zero'] += 1
elif f.severity == 'High':
o_count['one'] += 1
elif f.severity == 'Medium':
o_count['two'] += 1
elif f.severity == 'Low':
o_count['three'] += 1
elif f.mitigated is None and f.date <= end_of_period.date():
if f.severity == 'Critical':
a_count['zero'] += 1
elif f.severity == 'High':
a_count['one'] += 1
elif f.severity == 'Medium':
a_count['two'] += 1
elif f.severity == 'Low':
a_count['three'] += 1
total = sum(o_count.values()) - o_count['closed']
if period_type == 0:
counts.append(
start_of_period.strftime("%b %d") + " - " +
end_of_period.strftime("%b %d"))
else:
counts.append(start_of_period.strftime("%b %Y"))
counts.append(o_count['zero'])
counts.append(o_count['one'])
counts.append(o_count['two'])
counts.append(o_count['three'])
counts.append(total)
counts.append(o_count['closed'])
stuff.append(counts)
o_stuff.append(counts[:-1])
a_counts = []
a_total = sum(a_count.values())
if period_type == 0:
a_counts.append(
start_of_period.strftime("%b %d") + " - " +
end_of_period.strftime("%b %d"))
else:
a_counts.append(start_of_period.strftime("%b %Y"))
a_counts.append(a_count['zero'])
a_counts.append(a_count['one'])
a_counts.append(a_count['two'])
a_counts.append(a_count['three'])
a_counts.append(a_total)
a_stuff.append(a_counts)
def add_breadcrumb(parent=None,
title=None,
top_level=True,
url=None,
request=None,
clear=False):
title_done = False
if clear:
request.session['dojo_breadcrumbs'] = None
return
else:
crumbs = request.session.get('dojo_breadcrumbs', None)
if top_level or crumbs is None:
crumbs = [
{
'title': 'Home',
'url': reverse('home')
},
]
if parent is not None and getattr(parent, "get_breadcrumbs", None):
crumbs += parent.get_breadcrumbs()
else:
title_done = True
crumbs += [{
'title': title,
'url': request.get_full_path() if url is None else url
}]
else:
resolver = get_resolver(None).resolve
if parent is not None and getattr(parent, "get_breadcrumbs", None):
obj_crumbs = parent.get_breadcrumbs()
if title is not None:
obj_crumbs += [{
'title':
title,
'url':
request.get_full_path() if url is None else url
}]
else:
title_done = True
obj_crumbs = [{
'title':
title,
'url':
request.get_full_path() if url is None else url
}]
for crumb in crumbs:
crumb_to_resolve = crumb['url'] if '?' not in crumb[
'url'] else crumb['url'][:crumb['url'].index('?')]
crumb_view = resolver(crumb_to_resolve)
for obj_crumb in obj_crumbs:
obj_crumb_to_resolve = obj_crumb[
'url'] if '?' not in obj_crumb['url'] else obj_crumb[
'url'][:obj_crumb['url'].index('?')]
obj_crumb_view = resolver(obj_crumb_to_resolve)
if crumb_view.view_name == obj_crumb_view.view_name:
if crumb_view.kwargs == obj_crumb_view.kwargs:
if len(obj_crumbs) == 1 and crumb in crumbs:
crumbs = crumbs[:crumbs.index(crumb)]
else:
obj_crumbs.remove(obj_crumb)
else:
if crumb in crumbs:
crumbs = crumbs[:crumbs.index(crumb)]
crumbs += obj_crumbs
request.session['dojo_breadcrumbs'] = crumbs
def get_punchcard_data(findings, weeks_between, start_date):
punchcard = list()
ticks = list()
highest_count = 0
tick = 0
week_count = 1
# mon 0, tues 1, wed 2, thurs 3, fri 4, sat 5, sun 6
# sat 0, sun 6, mon 5, tue 4, wed 3, thur 2, fri 1
day_offset = {0: 5, 1: 4, 2: 3, 3: 2, 4: 1, 5: 0, 6: 6}
for x in range(-1, weeks_between):
# week starts the monday before
new_date = start_date + relativedelta(weeks=x, weekday=MO(1))
end_date = new_date + relativedelta(weeks=1)
append_tick = True
days = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
for finding in findings:
try:
if new_date < datetime.combine(finding.date, datetime.min.time(
)).replace(tzinfo=timezone.get_current_timezone()) <= end_date:
# [0,0,(20*.02)]
# [week, day, weight]
days[day_offset[finding.date.weekday()]] += 1
if days[day_offset[finding.date.weekday()]] > highest_count:
highest_count = days[day_offset[
finding.date.weekday()]]
except:
if new_date < finding.date <= end_date:
# [0,0,(20*.02)]
# [week, day, weight]
days[day_offset[finding.date.weekday()]] += 1
if days[day_offset[finding.date.weekday()]] > highest_count:
highest_count = days[day_offset[
finding.date.weekday()]]
pass
if sum(days.values()) > 0:
for day, count in list(days.items()):
punchcard.append([tick, day, count])
if append_tick:
ticks.append([
tick,
new_date.strftime(
"<span class='small'>%m/%d<br/>%Y</span>")
])
append_tick = False
tick += 1
week_count += 1
# adjust the size
ratio = (sqrt(highest_count / pi))
for punch in punchcard:
punch[2] = (sqrt(punch[2] / pi)) / ratio
return punchcard, ticks, highest_count
# 5 params
def get_period_counts_legacy(findings,
findings_closed,
accepted_findings,
period_interval,
start_date,
relative_delta='months'):
opened_in_period = list()
accepted_in_period = list()
opened_in_period.append(
['Timestamp', 'Date', 'S0', 'S1', 'S2', 'S3', 'Total', 'Closed'])
accepted_in_period.append(
['Timestamp', 'Date', 'S0', 'S1', 'S2', 'S3', 'Total', 'Closed'])
for x in range(-1, period_interval):
if relative_delta == 'months':
# make interval the first through last of month
end_date = (start_date + relativedelta(months=x)) + relativedelta(
day=1, months=+1, days=-1)
new_date = (
start_date + relativedelta(months=x)) + relativedelta(day=1)
else:
# week starts the monday before
new_date = start_date + relativedelta(weeks=x, weekday=MO(1))
end_date = new_date + relativedelta(weeks=1, weekday=MO(1))
closed_in_range_count = findings_closed.filter(
mitigated__range=[new_date, end_date]).count()
if accepted_findings:
risks_a = accepted_findings.filter(
risk_acceptance__created__range=[
datetime(
new_date.year,
new_date.month,
1,
tzinfo=timezone.get_current_timezone()),
datetime(
new_date.year,
new_date.month,
monthrange(new_date.year, new_date.month)[1],
tzinfo=timezone.get_current_timezone())
])
else:
risks_a = None
crit_count, high_count, med_count, low_count, closed_count = [
0, 0, 0, 0, 0
]
for finding in findings:
if new_date <= datetime.combine(finding.date, datetime.min.time(
)).replace(tzinfo=timezone.get_current_timezone()) <= end_date:
if finding.severity == 'Critical':
crit_count += 1
elif finding.severity == 'High':
high_count += 1
elif finding.severity == 'Medium':
med_count += 1
elif finding.severity == 'Low':
low_count += 1
total = crit_count + high_count + med_count + low_count
opened_in_period.append(
[(tcalendar.timegm(new_date.timetuple()) * 1000), new_date,
crit_count, high_count, med_count, low_count, total,
closed_in_range_count])
crit_count, high_count, med_count, low_count, closed_count = [
0, 0, 0, 0, 0
]
if risks_a is not None:
for finding in risks_a:
if finding.severity == 'Critical':
crit_count += 1
elif finding.severity == 'High':
high_count += 1
elif finding.severity == 'Medium':
med_count += 1
elif finding.severity == 'Low':
low_count += 1
total = crit_count + high_count + med_count + low_count
accepted_in_period.append(
[(tcalendar.timegm(new_date.timetuple()) * 1000), new_date,
crit_count, high_count, med_count, low_count, total])
return {
'opened_per_period': opened_in_period,
'accepted_per_period': accepted_in_period
}
def get_period_counts(active_findings,
findings,
findings_closed,
accepted_findings,
period_interval,
start_date,
relative_delta='months'):
start_date = datetime(
start_date.year,
start_date.month,
start_date.day,
tzinfo=timezone.get_current_timezone())
opened_in_period = list()
active_in_period = list()
accepted_in_period = list()
opened_in_period.append(
['Timestamp', 'Date', 'S0', 'S1', 'S2', 'S3', 'Total', 'Closed'])
active_in_period.append(
['Timestamp', 'Date', 'S0', 'S1', 'S2', 'S3', 'Total', 'Closed'])
accepted_in_period.append(
['Timestamp', 'Date', 'S0', 'S1', 'S2', 'S3', 'Total', 'Closed'])
for x in range(-1, period_interval):
if relative_delta == 'months':
# make interval the first through last of month
end_date = (start_date + relativedelta(months=x)) + relativedelta(
day=1, months=+1, days=-1)
new_date = (
start_date + relativedelta(months=x)) + relativedelta(day=1)
else:
# week starts the monday before
new_date = start_date + relativedelta(weeks=x, weekday=MO(1))
end_date = new_date + relativedelta(weeks=1, weekday=MO(1))
closed_in_range_count = findings_closed.filter(
mitigated__range=[new_date, end_date]).count()
if accepted_findings:
risks_a = accepted_findings.filter(
risk_acceptance__created__range=[
datetime(
new_date.year,
new_date.month,
1,
tzinfo=timezone.get_current_timezone()),
datetime(
new_date.year,
new_date.month,
monthrange(new_date.year, new_date.month)[1],
tzinfo=timezone.get_current_timezone())
])
else:
risks_a = None
crit_count, high_count, med_count, low_count, closed_count = [
0, 0, 0, 0, 0
]
for finding in findings:
try:
if new_date <= datetime.combine(
finding.date, datetime.min.time()
).replace(tzinfo=timezone.get_current_timezone()) <= end_date:
if finding.severity == 'Critical':
crit_count += 1
elif finding.severity == 'High':
high_count += 1
elif finding.severity == 'Medium':
med_count += 1
elif finding.severity == 'Low':
low_count += 1
except:
if new_date <= finding.date <= end_date:
if finding.severity == 'Critical':
crit_count += 1
elif finding.severity == 'High':
high_count += 1
elif finding.severity == 'Medium':
med_count += 1
elif finding.severity == 'Low':
low_count += 1
pass
total = crit_count + high_count + med_count + low_count
opened_in_period.append(
[(tcalendar.timegm(new_date.timetuple()) * 1000), new_date,
crit_count, high_count, med_count, low_count, total,
closed_in_range_count])
crit_count, high_count, med_count, low_count, closed_count = [
0, 0, 0, 0, 0
]
if risks_a is not None:
for finding in risks_a:
if finding.severity == 'Critical':
crit_count += 1
elif finding.severity == 'High':
high_count += 1
elif finding.severity == 'Medium':
med_count += 1
elif finding.severity == 'Low':
low_count += 1
total = crit_count + high_count + med_count + low_count
accepted_in_period.append(
[(tcalendar.timegm(new_date.timetuple()) * 1000), new_date,
crit_count, high_count, med_count, low_count, total])
crit_count, high_count, med_count, low_count, closed_count = [
0, 0, 0, 0, 0
]
for finding in active_findings:
try:
if datetime.combine(finding.date, datetime.min.time()).replace(
tzinfo=timezone.get_current_timezone()) <= end_date:
if finding.severity == 'Critical':
crit_count += 1
elif finding.severity == 'High':
high_count += 1
elif finding.severity == 'Medium':
med_count += 1
elif finding.severity == 'Low':
low_count += 1
except:
if finding.date <= end_date:
if finding.severity == 'Critical':
crit_count += 1
elif finding.severity == 'High':
high_count += 1
elif finding.severity == 'Medium':
med_count += 1
elif finding.severity == 'Low':
low_count += 1
pass
total = crit_count + high_count + med_count + low_count
active_in_period.append(
[(tcalendar.timegm(new_date.timetuple()) * 1000), new_date,
crit_count, high_count, med_count, low_count, total])
return {
'opened_per_period': opened_in_period,
'accepted_per_period': accepted_in_period,
'active_per_period': active_in_period
}
def opened_in_period(start_date, end_date, pt):
start_date = datetime(
start_date.year,
start_date.month,
start_date.day,
tzinfo=timezone.get_current_timezone())
end_date = datetime(
end_date.year,
end_date.month,
end_date.day,
tzinfo=timezone.get_current_timezone())
opened_in_period = Finding.objects.filter(
date__range=[start_date, end_date],
test__engagement__product__prod_type=pt,
verified=True,
false_p=False,
duplicate=False,
out_of_scope=False,
mitigated__isnull=True,
severity__in=(
'Critical', 'High', 'Medium',
'Low')).values('numerical_severity').annotate(
Count('numerical_severity')).order_by('numerical_severity')
total_opened_in_period = Finding.objects.filter(
date__range=[start_date, end_date],
test__engagement__product__prod_type=pt,
verified=True,
false_p=False,
duplicate=False,
out_of_scope=False,
mitigated__isnull=True,
severity__in=('Critical', 'High', 'Medium', 'Low')).aggregate(
total=Sum(
Case(
When(
severity__in=('Critical', 'High', 'Medium', 'Low'),
then=Value(1)),
output_field=IntegerField())))['total']
oip = {
'S0':
0,
'S1':
0,
'S2':
0,
'S3':
0,
'Total':
total_opened_in_period,
'start_date':
start_date,
'end_date':
end_date,
'closed':
Finding.objects.filter(
mitigated__range=[start_date, end_date],
test__engagement__product__prod_type=pt,
severity__in=('Critical', 'High', 'Medium', 'Low')).aggregate(
total=Sum(
Case(
When(
severity__in=('Critical', 'High', 'Medium', 'Low'),
then=Value(1)),
output_field=IntegerField())))['total'],
'to_date_total':
Finding.objects.filter(
date__lte=end_date.date(),
verified=True,
false_p=False,
duplicate=False,
out_of_scope=False,
mitigated__isnull=True,
test__engagement__product__prod_type=pt,
severity__in=('Critical', 'High', 'Medium', 'Low')).count()
}
for o in opened_in_period:
oip[o['numerical_severity']] = o['numerical_severity__count']
return oip
def message(count, noun, verb):
return ('{} ' + noun + '{} {} ' + verb).format(
count, pluralize(count), pluralize(count, 'was,were'))
class FileIterWrapper(object):
def __init__(self, flo, chunk_size=1024**2):
self.flo = flo
self.chunk_size = chunk_size
def __next__(self):
data = self.flo.read(self.chunk_size)
if data:
return data
else:
raise StopIteration
def __iter__(self):
return self
def get_cal_event(start_date, end_date, summary, description, uid):
cal = vobject.iCalendar()
cal.add('vevent')
cal.vevent.add('summary').value = summary
cal.vevent.add('description').value = description
start = cal.vevent.add('dtstart')
start.value = start_date
end = cal.vevent.add('dtend')
end.value = end_date
cal.vevent.add('uid').value = uid
return cal
def named_month(month_number):
"""
Return the name of the month, given the number.
"""
return date(1900, month_number, 1).strftime("%B")
def normalize_query(query_string,
findterms=re.compile(r'"([^"]+)"|(\S+)').findall,
normspace=re.compile(r'\s{2,}').sub):
return [
normspace(' ', (t[0] or t[1]).strip()) for t in findterms(query_string)
]
def build_query(query_string, search_fields):
""" Returns a query, that is a combination of Q objects. That combination
aims to search keywords within a model by testing the given search fields.
"""
query = None # Query to search for every search term
terms = normalize_query(query_string)
for term in terms:
or_query = None # Query to search for a given term in each field
for field_name in search_fields:
q = Q(**{"%s__icontains" % field_name: term})
if or_query:
or_query = or_query | q
else:
or_query = q
if query:
query = query & or_query
else:
query = or_query
return query
def template_search_helper(fields=None, query_string=None):
if not fields:
fields = [
'title',
'description',
]
findings = Finding_Template.objects.all()
if not query_string:
return findings
entry_query = build_query(query_string, fields)
found_entries = findings.filter(entry_query)
return found_entries
def get_page_items(request, items, page_size, param_name='page'):
size = request.GET.get('page_size', page_size)
paginator = Paginator(items, size)
page = request.GET.get(param_name)
try:
page = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
page = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
page = paginator.page(paginator.num_pages)
return page
def handle_uploaded_threat(f, eng):
name, extension = os.path.splitext(f.name)
with open(settings.MEDIA_ROOT + '/threat/%s%s' % (eng.id, extension),
'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
eng.tmodel_path = settings.MEDIA_ROOT + '/threat/%s%s' % (eng.id,
extension)
eng.save()
def handle_uploaded_selenium(f, cred):
name, extension = os.path.splitext(f.name)
with open(settings.MEDIA_ROOT + '/selenium/%s%s' % (cred.id, extension),
'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
cred.selenium_script = settings.MEDIA_ROOT + '/selenium/%s%s' % (cred.id,
extension)
cred.save()
# Gets a connection to a Jira server based on the finding
def get_jira_connection(finding):
jira = None
prod = Product.objects.get(
engagement=Engagement.objects.get(test=finding.test))
try:
jpkey = JIRA_PKey.objects.get(product=prod)
jira_conf = jpkey.conf
if jira_conf is not None:
jira = JIRA(
server=jira_conf.url,
basic_auth=(jira_conf.username, jira_conf.password))
except JIRA_PKey.DoesNotExist:
pass
return jira
def jira_get_resolution_id(jira, issue, status):
transitions = jira.transitions(issue)
resolution_id = None
for t in transitions:
if t['name'] == "Resolve Issue":
resolution_id = t['id']
break
if t['name'] == "Reopen Issue":
resolution_id = t['id']
break
return resolution_id
def jira_change_resolution_id(jira, issue, id):
jira.transition_issue(issue, id)
# Logs the error to the alerts table, which appears in the notification toolbar
def log_jira_generic_alert(title, description):
create_notification(
event='jira_update',
title=title,
description=description,
icon='bullseye',
source='Jira')
# Logs the error to the alerts table, which appears in the notification toolbar
def log_jira_alert(error, finding):
create_notification(
event='jira_update',
title='Jira update issue',
description='Finding: ' + str(finding.id) + ', ' + error,
icon='bullseye',
source='Jira')
# Displays an alert for Jira notifications
def log_jira_message(text, finding):
create_notification(
event='jira_update',
title='Jira update message',
description=text + " Finding: " + str(finding.id),
url=reverse('view_finding', args=(finding.id, )),
icon='bullseye',
source='Jira')
# Adds labels to a Jira issue
def add_labels(find, issue):
# Update Label with system setttings label
system_settings = System_Settings.objects.get()
labels = system_settings.jira_labels
if labels is None:
return
else:
labels = labels.split()
if len(labels) > 0:
for label in labels:
issue.fields.labels.append(label)
# Update the label with the product name (underscore)
prod_name = find.test.engagement.product.name.replace(" ", "_")
issue.fields.labels.append(prod_name)
issue.update(fields={"labels": issue.fields.labels})
def jira_long_description(find_description, find_id, jira_conf_finding_text):
return find_description + "\n\n*Dojo ID:* " + str(
find_id) + "\n\n" + jira_conf_finding_text
def add_issue(find, push_to_jira):
logger.debug('adding issue: ' + str(find))
eng = Engagement.objects.get(test=find.test)
prod = Product.objects.get(engagement=eng)
if JIRA_PKey.objects.filter(product=prod).count() == 0:
log_jira_alert(
'Finding cannot be pushed to jira as there is no jira configuration for this product.', find)
return
jpkey = JIRA_PKey.objects.get(product=prod)
jira_conf = jpkey.conf
if push_to_jira:
if 'Active' in find.status() and 'Verified' in find.status():
if ((jpkey.push_all_issues and Finding.get_number_severity(
System_Settings.objects.get().jira_minimum_severity) >=
Finding.get_number_severity(find.severity))):
log_jira_alert(
'Finding below jira_minimum_severity threshold.', find)
else:
logger.debug('Trying to create a new JIRA issue')
try:
JIRAError.log_to_tempfile = False
jira = JIRA(
server=jira_conf.url,
basic_auth=(jira_conf.username, jira_conf.password))
if jpkey.component:
new_issue = jira.create_issue(
project=jpkey.project_key,
summary=find.title,
components=[
{
'name': jpkey.component
},
],
description=jira_long_description(
find.long_desc(), find.id,
jira_conf.finding_text),
issuetype={'name': jira_conf.default_issue_type},
priority={
'name': jira_conf.get_priority(find.severity)
})
else:
new_issue = jira.create_issue(
project=jpkey.project_key,
summary=find.title,
description=jira_long_description(
find.long_desc(), find.id,
jira_conf.finding_text),
issuetype={'name': jira_conf.default_issue_type},
priority={
'name': jira_conf.get_priority(find.severity)
})
j_issue = JIRA_Issue(
jira_id=new_issue.id, jira_key=new_issue, finding=find)
j_issue.save()
find.jira_creation = timezone.now()
find.jira_change = find.jira_creation
find.save()
issue = jira.issue(new_issue.id)
# Add labels (security & product)
add_labels(find, new_issue)
# Upload dojo finding screenshots to Jira
for pic in find.images.all():
jira_attachment(
jira, issue,
settings.MEDIA_ROOT + pic.image_large.name)
# if jpkey.enable_engagement_epic_mapping:
# epic = JIRA_Issue.objects.get(engagement=eng)
# issue_list = [j_issue.jira_id,]
# jira.add_issues_to_epic(epic_id=epic.jira_id, issue_keys=[str(j_issue.jira_id)], ignore_epics=True)
except JIRAError as e:
log_jira_alert(e.text, find)
else:
log_jira_alert("Finding not active or not verified.",
find)
def jira_attachment(jira, issue, file, jira_filename=None):
basename = file
if jira_filename is None:
basename = os.path.basename(file)
# Check to see if the file has been uploaded to Jira
if jira_check_attachment(issue, basename) is False:
try:
if jira_filename is not None:
attachment = io.StringIO()
attachment.write(jira_filename)
jira.add_attachment(
issue=issue, attachment=attachment, filename=jira_filename)
else:
# read and upload a file
with open(file, 'rb') as f:
jira.add_attachment(issue=issue, attachment=f)
except JIRAError as e:
log_jira_alert("Attachment: " + e.text)
def jira_check_attachment(issue, source_file_name):
file_exists = False
for attachment in issue.fields.attachment:
filename = attachment.filename
if filename == source_file_name:
file_exists = True
break
return file_exists
def update_issue(find, old_status, push_to_jira):
prod = Product.objects.get(
engagement=Engagement.objects.get(test=find.test))
jpkey = JIRA_PKey.objects.get(product=prod)
jira_conf = jpkey.conf
if push_to_jira:
j_issue = JIRA_Issue.objects.get(finding=find)
try:
JIRAError.log_to_tempfile = False
jira = JIRA(
server=jira_conf.url,
basic_auth=(jira_conf.username, jira_conf.password))
issue = jira.issue(j_issue.jira_id)
fields = {}
# Only update the component if it didn't exist earlier in Jira, this is to avoid assigning multiple components to an item
if issue.fields.components:
log_jira_alert(
"Component not updated, exists in Jira already. Update from Jira instead.",
find)
elif jpkey.component:
# Add component to the Jira issue
component = [
{
'name': jpkey.component
},
]
fields = {"components": component}
# Upload dojo finding screenshots to Jira
for pic in find.images.all():
jira_attachment(jira, issue,
settings.MEDIA_ROOT + pic.image_large.name)
issue.update(
summary=find.title,
description=jira_long_description(find.long_desc(), find.id,
jira_conf.finding_text),
priority={'name': jira_conf.get_priority(find.severity)},
fields=fields)
print('\n\nSaving jira_change\n\n')
find.jira_change = timezone.now()
find.save()
# Add labels(security & product)
add_labels(find, issue)
except JIRAError as e:
log_jira_alert(e.text, find)
req_url = jira_conf.url + '/rest/api/latest/issue/' + \
j_issue.jira_id + '/transitions'
if 'Inactive' in find.status() or 'Mitigated' in find.status(
) or 'False Positive' in find.status(
) or 'Out of Scope' in find.status() or 'Duplicate' in find.status():
if 'Active' in old_status:
json_data = {'transition': {'id': jira_conf.close_status_key}}
r = requests.post(
url=req_url,
auth=HTTPBasicAuth(jira_conf.username, jira_conf.password),
json=json_data)
find.jira_change = timezone.now()
find.save()
elif 'Active' in find.status() and 'Verified' in find.status():
if 'Inactive' in old_status:
json_data = {'transition': {'id': jira_conf.open_status_key}}
r = requests.post(
url=req_url,
auth=HTTPBasicAuth(jira_conf.username, jira_conf.password),
json=json_data)
find.jira_change = timezone.now()
find.save()
def close_epic(eng, push_to_jira):
engagement = eng
prod = Product.objects.get(engagement=engagement)
jpkey = JIRA_PKey.objects.get(product=prod)
jira_conf = jpkey.conf
if jpkey.enable_engagement_epic_mapping and push_to_jira:
try:
j_issue = JIRA_Issue.objects.get(engagement=eng)
req_url = jira_conf.url + '/rest/api/latest/issue/' + \
j_issue.jira_id + '/transitions'
j_issue = JIRA_Issue.objects.get(engagement=eng)
json_data = {'transition': {'id': jira_conf.close_status_key}}
r = requests.post(
url=req_url,
auth=HTTPBasicAuth(jira_conf.username, jira_conf.password),
json=json_data)
except Exception as e:
log_jira_generic_alert('Jira Engagement/Epic Close Error', str(e))
pass
def update_epic(eng, push_to_jira):
engagement = eng
prod = Product.objects.get(engagement=engagement)
jpkey = JIRA_PKey.objects.get(product=prod)
jira_conf = jpkey.conf
if jpkey.enable_engagement_epic_mapping and push_to_jira:
try:
jira = JIRA(
server=jira_conf.url,
basic_auth=(jira_conf.username, jira_conf.password))
j_issue = JIRA_Issue.objects.get(engagement=eng)
issue = jira.issue(j_issue.jira_id)
issue.update(summary=eng.name, description=eng.name)
except Exception as e:
log_jira_generic_alert('Jira Engagement/Epic Update Error', str(e))
pass
def add_epic(eng, push_to_jira):
engagement = eng
prod = Product.objects.get(engagement=engagement)
jpkey = JIRA_PKey.objects.get(product=prod)
jira_conf = jpkey.conf
if jpkey.enable_engagement_epic_mapping and push_to_jira:
issue_dict = {
'project': {
'key': jpkey.project_key
},
'summary': engagement.name,
'description': engagement.name,
'issuetype': {
'name': 'Epic'
},
'customfield_' + str(jira_conf.epic_name_id): engagement.name,
}
try:
jira = JIRA(
server=jira_conf.url,
basic_auth=(jira_conf.username, jira_conf.password))
new_issue = jira.create_issue(fields=issue_dict)
j_issue = JIRA_Issue(
jira_id=new_issue.id,
jira_key=new_issue,
engagement=engagement)
j_issue.save()
except Exception as e:
error = str(e)
message = ""
if "customfield" in error:
message = "The 'Epic name id' in your DefectDojo Jira Configuration does not appear to be correct. Please visit, " + jira_conf.url + \
"/rest/api/2/field and search for Epic Name. Copy the number out of cf[number] and place in your DefectDojo settings for Jira and try again. For example, if your results are cf[100001] then copy 100001 and place it in 'Epic name id'. (Your Epic Id will be different.) \n\n"
log_jira_generic_alert('Jira Engagement/Epic Creation Error',
message + error)
pass
def add_comment(find, note, force_push=False):
prod = Product.objects.get(
engagement=Engagement.objects.get(test=find.test))
try:
jpkey = JIRA_PKey.objects.get(product=prod)
jira_conf = jpkey.conf
if jpkey.push_notes or force_push is True:
try:
jira = JIRA(
server=jira_conf.url,
basic_auth=(jira_conf.username, jira_conf.password))
j_issue = JIRA_Issue.objects.get(finding=find)
jira.add_comment(
j_issue.jira_id,
'(%s): %s' % (note.author.get_full_name(), note.entry))
except Exception as e:
log_jira_generic_alert('Jira Add Comment Error', str(e))
pass
except JIRA_PKey.DoesNotExist:
pass
def send_review_email(request, user, finding, users, new_note):
# TODO remove apparent dead code
recipients = [u.email for u in users]
msg = "\nGreetings, \n\n"
msg += "{0} has requested that you please review ".format(str(user))
msg += "the following finding for accuracy:"
msg += "\n\n" + finding.title
msg += "\n\nIt can be reviewed at " + request.build_absolute_uri(
reverse("view_finding", args=(finding.id, )))
msg += "\n\n{0} provided the following details:".format(str(user))
msg += "\n\n" + new_note.entry
msg += "\n\nThanks\n"
send_mail(
'DefectDojo Finding Review Request',
msg,
user.email,
recipients,
fail_silently=False)
pass
def process_notifications(request, note, parent_url, parent_title):
regex = re.compile(r'(?:\A|\s)@(\w+)\b')
usernames_to_check = set([un.lower() for un in regex.findall(note.entry)])
users_to_notify = [
User.objects.filter(username=username).get()
for username in usernames_to_check
if User.objects.filter(is_active=True, username=username).exists()
] # is_staff also?
user_posting = request.user
if len(note.entry) > 20:
note.entry = note.entry[:20]
note.entry += "..."
create_notification(
event='user_mentioned',
section=parent_title,
note=note,
user=request.user,
title='%s jotted a note' % request.user,
url=parent_url,
icon='commenting',
recipients=users_to_notify)
def send_atmention_email(user, users, parent_url, parent_title, new_note):
recipients = [u.email for u in users]
msg = "\nGreetings, \n\n"
msg += "User {0} mentioned you in a note on {1}".format(
str(user), parent_title)
msg += "\n\n" + new_note.entry
msg += "\n\nIt can be reviewed at " + parent_url
msg += "\n\nThanks\n"
send_mail(
'DefectDojo - {0} @mentioned you in a note'.format(str(user)),
msg,
user.email,
recipients,
fail_silently=False)
def encrypt(key, iv, plaintext):
text = ""
if plaintext and plaintext is not None:
backend = default_backend()
cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend)
encryptor = cipher.encryptor()
plaintext = _pad_string(plaintext)
encrypted_text = encryptor.update(plaintext) + encryptor.finalize()
text = binascii.b2a_hex(encrypted_text).rstrip()
return text
def decrypt(key, iv, encrypted_text):
backend = default_backend()
cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend)
encrypted_text_bytes = binascii.a2b_hex(encrypted_text)
decryptor = cipher.decryptor()
decrypted_text = decryptor.update(encrypted_text_bytes) + decryptor.finalize()
decrypted_text = _unpad_string(decrypted_text)
return decrypted_text
def _pad_string(value):
length = len(value)
pad_size = 16 - (length % 16)
return value.ljust(length + pad_size, b'\x00')
def _unpad_string(value):
if value and value is not None:
value = value.rstrip(b'\x00')
return value
def dojo_crypto_encrypt(plaintext):
data = None
if plaintext:
key = None
key = get_db_key()
iv = os.urandom(16)
data = prepare_for_save(
iv, encrypt(key, iv, plaintext.encode('utf-8')))
return data
def prepare_for_save(iv, encrypted_value):
stored_value = None
if encrypted_value and encrypted_value is not None:
binascii.b2a_hex(encrypted_value).rstrip()
stored_value = "AES.1:" + binascii.b2a_hex(iv).decode('utf-8') + ":" + encrypted_value.decode('utf-8')
return stored_value
def get_db_key():
db_key = None
if hasattr(settings, 'DB_KEY'):
db_key = settings.DB_KEY
db_key = binascii.b2a_hex(
hashlib.sha256(db_key.encode('utf-8')).digest().rstrip())[:32]
return db_key
def prepare_for_view(encrypted_value):
key = None
decrypted_value = ""
if encrypted_value is not NotImplementedError and encrypted_value is not None:
key = get_db_key()
encrypted_values = encrypted_value.split(":")
if len(encrypted_values) > 1:
type = encrypted_values[0]
iv = binascii.a2b_hex(encrypted_values[1])
value = encrypted_values[2]
try:
decrypted_value = decrypt(key, iv, value)
decrypted_value = decrypted_value.decode('utf-8')
except UnicodeDecodeError:
decrypted_value = ""
return decrypted_value
def get_system_setting(setting):
try:
system_settings = System_Settings.objects.get()
except:
system_settings = System_Settings()
return getattr(system_settings, setting, None)
def get_slack_user_id(user_email):
user_id = None
res = requests.request(
method='POST',
url='https://slack.com/api/users.list',
data={'token': get_system_setting('slack_token')})
users = json.loads(res.text)
if users:
for member in users["members"]:
if "email" in member["profile"]:
if user_email == member["profile"]["email"]:
if "id" in member:
user_id = member["id"]
break
return user_id
def create_notification(event=None, **kwargs):
def create_description(event):
if "description" not in kwargs.keys():
if event == 'product_added':
kwargs["description"] = "Product " + kwargs['title'] + " has been created successfully."
else:
kwargs["description"] = "Event " + str(event) + " has occured."
def create_notification_message(event, notification_type):
template = 'notifications/%s.tpl' % event.replace('/', '')
kwargs.update({'type': notification_type})
try:
notification = render_to_string(template, kwargs)
except:
create_description(event)
notification = render_to_string('notifications/other.tpl', kwargs)
return notification
def send_slack_notification(channel):
try:
res = requests.request(
method='POST',
url='https://slack.com/api/chat.postMessage',
data={
'token': get_system_setting('slack_token'),
'channel': channel,
'username': get_system_setting('slack_username'),
'text': create_notification_message(event, 'slack')
})
except Exception as e:
log_alert(e)
pass
def send_hipchat_notification(channel):
try:
# We use same template for HipChat as for slack
res = requests.request(
method='POST',
url='https://%s/v2/room/%s/notification?auth_token=%s' %
(get_system_setting('hipchat_site'), channel,
get_system_setting('hipchat_token')),
data={
'message': create_notification_message(event, 'slack'),
'message_format': 'text'
})
except Exception as e:
log_alert(e)
pass
def send_mail_notification(address):
subject = '%s notification' % get_system_setting('team_name')
if 'title' in kwargs:
subject += ': %s' % kwargs['title']
try:
email = EmailMessage(
subject,
create_notification_message(event, 'mail'),
get_system_setting('mail_notifications_from'),
[address],
headers={"From": "{}".format(get_system_setting('mail_notifications_from'))}
)
email.send(fail_silently=False)
except Exception as e:
log_alert(e)
pass
def send_alert_notification(user=None):
icon = kwargs.get('icon', 'info-circle')
alert = Alerts(
user_id=user,
title=kwargs.get('title'),
description=create_notification_message(event, 'alert'),
url=kwargs.get('url', reverse('alerts')),
icon=icon,
source=Notifications._meta.get_field(event).verbose_name.title())
alert.save()
def log_alert(e):
users = Dojo_User.objects.filter(is_superuser=True)
for user in users:
alert = Alerts(
user_id=user,
url=kwargs.get('url', reverse('alerts')),
title='Notification issue',
description="%s" % e,
icon="exclamation-triangle",
source="Notifications")
alert.save()
# Global notifications
try:
notifications = Notifications.objects.get(user=None)
except Exception as e:
notifications = Notifications()
slack_enabled = get_system_setting('enable_slack_notifications')
hipchat_enabled = get_system_setting('enable_hipchat_notifications')
mail_enabled = get_system_setting('enable_mail_notifications')
if slack_enabled and 'slack' in getattr(notifications, event):
send_slack_notification(get_system_setting('slack_channel'))
if hipchat_enabled and 'hipchat' in getattr(notifications, event):
send_hipchat_notification(get_system_setting('hipchat_channel'))
if mail_enabled and 'mail' in getattr(notifications, event):
send_mail_notification(get_system_setting('mail_notifications_to'))
if 'alert' in getattr(notifications, event, None):
send_alert_notification()
# Personal notifications
if 'recipients' in kwargs:
users = User.objects.filter(username__in=kwargs['recipients'])
else:
users = User.objects.filter(is_superuser=True)
for user in users:
try:
notifications = Notifications.objects.get(user=user)
except Exception as e:
notifications = Notifications()
if slack_enabled and 'slack' in getattr(
notifications,
event) and user.usercontactinfo.slack_username is not None:
slack_user_id = user.usercontactinfo.slack_user_id
if user.usercontactinfo.slack_user_id is None:
# Lookup the slack userid
slack_user_id = get_slack_user_id(
user.usercontactinfo.slack_username)
slack_user_save = UserContactInfo.objects.get(user_id=user.id)
slack_user_save.slack_user_id = slack_user_id
slack_user_save.save()
send_slack_notification('@%s' % slack_user_id)
# HipChat doesn't seem to offer direct message functionality, so no HipChat PM functionality here...
if mail_enabled and 'mail' in getattr(notifications, event):
send_mail_notification(user.email)
if 'alert' in getattr(notifications, event):
send_alert_notification(user)
def calculate_grade(product):
system_settings = System_Settings.objects.get()
if system_settings.enable_product_grade:
severity_values = Finding.objects.filter(
~Q(severity='Info'),
active=True,
duplicate=False,
verified=True,
false_p=False,
test__engagement__product=product).values('severity').annotate(
Count('numerical_severity')).order_by()
low = 0
medium = 0
high = 0
critical = 0
for severity_count in severity_values:
if severity_count['severity'] == "Critical":
critical = severity_count['numerical_severity__count']
elif severity_count['severity'] == "High":
high = severity_count['numerical_severity__count']
elif severity_count['severity'] == "Medium":
medium = severity_count['numerical_severity__count']
elif severity_count['severity'] == "Low":
low = severity_count['numerical_severity__count']
aeval = Interpreter()
aeval(system_settings.product_grade)
grade_product = "grade_product(%s, %s, %s, %s)" % (
critical, high, medium, low)
product.prod_numeric_grade = aeval(grade_product)
product.save()
def get_celery_worker_status():
from .tasks import celery_status
res = celery_status.apply_async()
# Wait 15 seconds for a response from Celery
try:
return res.get(timeout=15)
except:
return False
# Used to display the counts and enabled tabs in the product view
class Product_Tab():
def __init__(self, product_id, title=None, tab=None):
self.product = Product.objects.get(id=product_id)
self.title = title
self.tab = tab
self.engagement_count = Engagement.objects.filter(
product=self.product, active=True).count()
self.open_findings_count = Finding.objects.filter(test__engagement__product=self.product,
false_p=False,
verified=True,
duplicate=False,
out_of_scope=False,
active=True,
mitigated__isnull=True).count()
self.endpoints_count = Endpoint.objects.filter(
product=self.product).count()
self.benchmark_type = Benchmark_Type.objects.filter(
enabled=True).order_by('name')
self.engagement = None
def setTab(self, tab):
self.tab = tab
def setEngagement(self, engagement):
self.engagement = engagement
def engagement(self):
return self.engagement
def tab(self):
return self.tab
def setTitle(self, title):
self.title = title
def title(self):
return self.title
def product(self):
return self.product
def engagements(self):
return self.engagement_count
def findings(self):
return self.open_findings_count
def endpoints(self):
return self.endpoints_count
def benchmark_type(self):
return self.benchmark_type
# Used to display the counts and enabled tabs in the product view
def tab_view_count(product_id):
product = Product.objects.get(id=product_id)
engagements = Engagement.objects.filter(
product=product, active=True).count()
open_findings = Finding.objects.filter(test__engagement__product=product,
false_p=False,
verified=True,
duplicate=False,
out_of_scope=False,
active=True,
mitigated__isnull=True).count()
endpoints = Endpoint.objects.filter(product=product).count()
# benchmarks = Benchmark_Product_Summary.objects.filter(product=product, publish=True, benchmark_type__enabled=True).order_by('benchmark_type__name')
benchmark_type = Benchmark_Type.objects.filter(
enabled=True).order_by('name')
return product, engagements, open_findings, endpoints, benchmark_type
# Add a lanaguage to product
def add_language(product, language):
prod_language = Languages.objects.filter(
language__language__iexact=language, product=product)
if not prod_language:
try:
language_type = Language_Type.objects.get(
language__iexact=language)
if language_type:
lang = Languages(language=language_type, product=product)
lang.save()
except Language_Type.DoesNotExist:
pass
# Apply finding template data by matching CWE + Title or CWE
def apply_cwe_to_template(finding, override=False):
if System_Settings.objects.get().enable_template_match or override:
# Attempt to match on CWE and Title First
template = Finding_Template.objects.filter(
cwe=finding.cwe, title__icontains=finding.title, template_match=True).first()
# If none then match on CWE
template = Finding_Template.objects.filter(
cwe=finding.cwe, template_match=True).first()
if template:
finding.mitigation = template.mitigation
finding.impact = template.impact
finding.references = template.references
template.last_used = timezone.now()
template.save()
return finding
|
OWASP/django-DefectDojo
|
dojo/utils.py
|
Python
|
bsd-3-clause
| 68,009
|
[
"VisIt"
] |
72b0008b20914cffbaefa2ff27553c086f1a6bfd3553711f94d8fbf6d24ae098
|
"""
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tensorflow import keras
from .. import initializers
from .. import layers
from ..utils.anchors import AnchorParameters
from . import assert_training_model
def default_classification_model(
num_classes,
num_anchors,
pyramid_feature_size=256,
prior_probability=0.01,
classification_feature_size=256,
name='classification_submodel'
):
""" Creates the default classification submodel.
Args
num_classes : Number of classes to predict a score for at each feature level.
num_anchors : Number of anchors to predict classification scores for at each feature level.
pyramid_feature_size : The number of filters to expect from the feature pyramid levels.
classification_feature_size : The number of filters to use in the layers in the classification submodel.
name : The name of the submodel.
Returns
A keras.models.Model that predicts classes for each anchor.
"""
options = {
'kernel_size' : 3,
'strides' : 1,
'padding' : 'same',
}
if keras.backend.image_data_format() == 'channels_first':
inputs = keras.layers.Input(shape=(pyramid_feature_size, None, None))
else:
inputs = keras.layers.Input(shape=(None, None, pyramid_feature_size))
outputs = inputs
for i in range(4):
outputs = keras.layers.Conv2D(
filters=classification_feature_size,
activation='relu',
name='pyramid_classification_{}'.format(i),
kernel_initializer=keras.initializers.RandomNormal(mean=0.0, stddev=0.01, seed=None),
bias_initializer='zeros',
**options
)(outputs)
outputs = keras.layers.Conv2D(
filters=num_classes * num_anchors,
kernel_initializer=keras.initializers.RandomNormal(mean=0.0, stddev=0.01, seed=None),
bias_initializer=initializers.PriorProbability(probability=prior_probability),
name='pyramid_classification',
**options
)(outputs)
# reshape output and apply sigmoid
if keras.backend.image_data_format() == 'channels_first':
outputs = keras.layers.Permute((2, 3, 1), name='pyramid_classification_permute')(outputs)
outputs = keras.layers.Reshape((-1, num_classes), name='pyramid_classification_reshape')(outputs)
outputs = keras.layers.Activation('sigmoid', name='pyramid_classification_sigmoid')(outputs)
return keras.models.Model(inputs=inputs, outputs=outputs, name=name)
def default_regression_model(num_values, num_anchors, pyramid_feature_size=256, regression_feature_size=256, name='regression_submodel'):
""" Creates the default regression submodel.
Args
num_values : Number of values to regress.
num_anchors : Number of anchors to regress for each feature level.
pyramid_feature_size : The number of filters to expect from the feature pyramid levels.
regression_feature_size : The number of filters to use in the layers in the regression submodel.
name : The name of the submodel.
Returns
A keras.models.Model that predicts regression values for each anchor.
"""
# All new conv layers except the final one in the
# RetinaNet (classification) subnets are initialized
# with bias b = 0 and a Gaussian weight fill with stddev = 0.01.
options = {
'kernel_size' : 3,
'strides' : 1,
'padding' : 'same',
'kernel_initializer' : keras.initializers.RandomNormal(mean=0.0, stddev=0.01, seed=None),
'bias_initializer' : 'zeros'
}
if keras.backend.image_data_format() == 'channels_first':
inputs = keras.layers.Input(shape=(pyramid_feature_size, None, None))
else:
inputs = keras.layers.Input(shape=(None, None, pyramid_feature_size))
outputs = inputs
for i in range(4):
outputs = keras.layers.Conv2D(
filters=regression_feature_size,
activation='relu',
name='pyramid_regression_{}'.format(i),
**options
)(outputs)
outputs = keras.layers.Conv2D(num_anchors * num_values, name='pyramid_regression', **options)(outputs)
if keras.backend.image_data_format() == 'channels_first':
outputs = keras.layers.Permute((2, 3, 1), name='pyramid_regression_permute')(outputs)
outputs = keras.layers.Reshape((-1, num_values), name='pyramid_regression_reshape')(outputs)
return keras.models.Model(inputs=inputs, outputs=outputs, name=name)
def __create_pyramid_features(backbone_layers, pyramid_levels, feature_size=256):
""" Creates the FPN layers on top of the backbone features.
Args
backbone_layers: a dictionary containing feature stages C3, C4, C5 from the backbone. Also contains C2 if provided.
pyramid_levels: Pyramid levels in use.
feature_size : The feature size to use for the resulting feature levels.
Returns
output_layers : A dict of feature levels. P3, P4, P5, P6 are always included. P2, P6, P7 included if in use.
"""
output_layers = {}
# upsample C5 to get P5 from the FPN paper
P5 = keras.layers.Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C5_reduced')(backbone_layers['C5'])
P5_upsampled = layers.UpsampleLike(name='P5_upsampled')([P5, backbone_layers['C4']])
P5 = keras.layers.Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P5')(P5)
output_layers["P5"] = P5
# add P5 elementwise to C4
P4 = keras.layers.Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C4_reduced')(backbone_layers['C4'])
P4 = keras.layers.Add(name='P4_merged')([P5_upsampled, P4])
P4_upsampled = layers.UpsampleLike(name='P4_upsampled')([P4, backbone_layers['C3']])
P4 = keras.layers.Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P4')(P4)
output_layers["P4"] = P4
# add P4 elementwise to C3
P3 = keras.layers.Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C3_reduced')(backbone_layers['C3'])
P3 = keras.layers.Add(name='P3_merged')([P4_upsampled, P3])
if 'C2' in backbone_layers and 2 in pyramid_levels:
P3_upsampled = layers.UpsampleLike(name='P3_upsampled')([P3, backbone_layers['C2']])
P3 = keras.layers.Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P3')(P3)
output_layers["P3"] = P3
if 'C2' in backbone_layers and 2 in pyramid_levels:
P2 = keras.layers.Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C2_reduced')(backbone_layers['C2'])
P2 = keras.layers.Add(name='P2_merged')([P3_upsampled, P2])
P2 = keras.layers.Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P2')(P2)
output_layers["P2"] = P2
# "P6 is obtained via a 3x3 stride-2 conv on C5"
if 6 in pyramid_levels:
P6 = keras.layers.Conv2D(feature_size, kernel_size=3, strides=2, padding='same', name='P6')(backbone_layers['C5'])
output_layers["P6"] = P6
# "P7 is computed by applying ReLU followed by a 3x3 stride-2 conv on P6"
if 7 in pyramid_levels:
if 6 not in pyramid_levels:
raise ValueError("P6 is required to use P7")
P7 = keras.layers.Activation('relu', name='C6_relu')(P6)
P7 = keras.layers.Conv2D(feature_size, kernel_size=3, strides=2, padding='same', name='P7')(P7)
output_layers["P7"] = P7
return output_layers
def default_submodels(num_classes, num_anchors):
""" Create a list of default submodels used for object detection.
The default submodels contains a regression submodel and a classification submodel.
Args
num_classes : Number of classes to use.
num_anchors : Number of base anchors.
Returns
A list of tuple, where the first element is the name of the submodel and the second element is the submodel itself.
"""
return [
('regression', default_regression_model(4, num_anchors)),
('classification', default_classification_model(num_classes, num_anchors))
]
def __build_model_pyramid(name, model, features):
""" Applies a single submodel to each FPN level.
Args
name : Name of the submodel.
model : The submodel to evaluate.
features : The FPN features.
Returns
A tensor containing the response from the submodel on the FPN features.
"""
return keras.layers.Concatenate(axis=1, name=name)([model(f) for f in features])
def __build_pyramid(models, features):
""" Applies all submodels to each FPN level.
Args
models : List of submodels to run on each pyramid level (by default only regression, classifcation).
features : The FPN features.
Returns
A list of tensors, one for each submodel.
"""
return [__build_model_pyramid(n, m, features) for n, m in models]
def __build_anchors(anchor_parameters, features):
""" Builds anchors for the shape of the features from FPN.
Args
anchor_parameters : Parameteres that determine how anchors are generated.
features : The FPN features.
Returns
A tensor containing the anchors for the FPN features.
The shape is:
```
(batch_size, num_anchors, 4)
```
"""
anchors = [
layers.Anchors(
size=anchor_parameters.sizes[i],
stride=anchor_parameters.strides[i],
ratios=anchor_parameters.ratios,
scales=anchor_parameters.scales,
name='anchors_{}'.format(i)
)(f) for i, f in enumerate(features)
]
return keras.layers.Concatenate(axis=1, name='anchors')(anchors)
def retinanet(
inputs,
backbone_layers,
num_classes,
num_anchors = None,
create_pyramid_features = __create_pyramid_features,
pyramid_levels = None,
submodels = None,
name = 'retinanet'
):
""" Construct a RetinaNet model on top of a backbone.
This model is the minimum model necessary for training (with the unfortunate exception of anchors as output).
Args
inputs : keras.layers.Input (or list of) for the input to the model.
num_classes : Number of classes to classify.
num_anchors : Number of base anchors.
create_pyramid_features : Functor for creating pyramid features given the features C3, C4, C5, and possibly C2 from the backbone.
pyramid_levels : pyramid levels to use.
submodels : Submodels to run on each feature map (default is regression and classification submodels).
name : Name of the model.
Returns
A keras.models.Model which takes an image as input and outputs generated anchors and the result from each submodel on every pyramid level.
The order of the outputs is as defined in submodels:
```
[
regression, classification, other[0], other[1], ...
]
```
"""
if num_anchors is None:
num_anchors = AnchorParameters.default.num_anchors()
if submodels is None:
submodels = default_submodels(num_classes, num_anchors)
if pyramid_levels is None:
pyramid_levels = [3, 4, 5, 6, 7]
if 2 in pyramid_levels and 'C2' not in backbone_layers:
raise ValueError("C2 not provided by backbone model. Cannot create P2 layers.")
if 3 not in pyramid_levels or 4 not in pyramid_levels or 5 not in pyramid_levels:
raise ValueError("pyramid levels 3, 4, and 5 required for functionality")
# compute pyramid features as per https://arxiv.org/abs/1708.02002
features = create_pyramid_features(backbone_layers, pyramid_levels)
feature_list = [features['P{}'.format(p)] for p in pyramid_levels]
# for all pyramid levels, run available submodels
pyramids = __build_pyramid(submodels, feature_list)
return keras.models.Model(inputs=inputs, outputs=pyramids, name=name)
def retinanet_bbox(
model = None,
nms = True,
class_specific_filter = True,
name = 'retinanet-bbox',
anchor_params = None,
pyramid_levels = None,
nms_threshold = 0.5,
score_threshold = 0.05,
max_detections = 300,
parallel_iterations = 32,
**kwargs
):
""" Construct a RetinaNet model on top of a backbone and adds convenience functions to output boxes directly.
This model uses the minimum retinanet model and appends a few layers to compute boxes within the graph.
These layers include applying the regression values to the anchors and performing NMS.
Args
model : RetinaNet model to append bbox layers to. If None, it will create a RetinaNet model using **kwargs.
nms : Whether to use non-maximum suppression for the filtering step.
class_specific_filter : Whether to use class specific filtering or filter for the best scoring class only.
name : Name of the model.
anchor_params : Struct containing anchor parameters. If None, default values are used.
pyramid_levels : pyramid levels to use.
nms_threshold : Threshold for the IoU value to determine when a box should be suppressed.
score_threshold : Threshold used to prefilter the boxes with.
max_detections : Maximum number of detections to keep.
parallel_iterations : Number of batch items to process in parallel.
**kwargs : Additional kwargs to pass to the minimal retinanet model.
Returns
A keras.models.Model which takes an image as input and outputs the detections on the image.
The order is defined as follows:
```
[
boxes, scores, labels, other[0], other[1], ...
]
```
"""
# if no anchor parameters are passed, use default values
if anchor_params is None:
anchor_params = AnchorParameters.default
# create RetinaNet model
if model is None:
model = retinanet(num_anchors=anchor_params.num_anchors(), **kwargs)
else:
assert_training_model(model)
if pyramid_levels is None:
pyramid_levels = [3, 4, 5, 6, 7]
assert len(pyramid_levels) == len(anchor_params.sizes), \
"number of pyramid levels {} should match number of anchor parameter sizes {}".format(len(pyramid_levels),
len(anchor_params.sizes))
pyramid_layer_names = ['P{}'.format(p) for p in pyramid_levels]
# compute the anchors
features = [model.get_layer(p_name).output for p_name in pyramid_layer_names]
anchors = __build_anchors(anchor_params, features)
# we expect the anchors, regression and classification values as first output
regression = model.outputs[0]
classification = model.outputs[1]
# "other" can be any additional output from custom submodels, by default this will be []
other = model.outputs[2:]
# apply predicted regression to anchors
boxes = layers.RegressBoxes(name='boxes')([anchors, regression])
boxes = layers.ClipBoxes(name='clipped_boxes')([model.inputs[0], boxes])
# filter detections (apply NMS / score threshold / select top-k)
detections = layers.FilterDetections(
nms = nms,
class_specific_filter = class_specific_filter,
name = 'filtered_detections',
nms_threshold = nms_threshold,
score_threshold = score_threshold,
max_detections = max_detections,
parallel_iterations = parallel_iterations
)([boxes, classification] + other)
# construct the model
return keras.models.Model(inputs=model.inputs, outputs=detections, name=name)
|
delftrobotics/keras-retinanet
|
keras_retinanet/models/retinanet.py
|
Python
|
apache-2.0
| 16,776
|
[
"Gaussian"
] |
2682d627fcc472f203140a31faf256ce2d6eefa25d334e676bf9ae2f8bcec0c3
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#
# This file is part of the NNGT project to generate and analyze
# neuronal networks and their activity.
# Copyright (C) 2015-2019 Tanguy Fardet
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# nest_plot.py
# This file is part of the NNGT module
# Distributed as a free software, in the hope that it will be useful, under the
# terms of the GNU General Public License.
""" Utility functions to plot NEST simulated activity """
import itertools
import logging
from matplotlib.colors import ColorConverter
import numpy as np
import nest
import nngt
from nngt.analysis import total_firing_rate
from nngt.lib import InvalidArgument, nonstring_container, is_integer
from nngt.lib.sorting import _sort_groups, _sort_neurons
from nngt.lib.logger import _log_message
from nngt.plot import palette_discrete, markers
from nngt.plot.plt_properties import _set_new_plot, _set_ax_lims
logger = logging.getLogger(__name__)
# --------------------- #
# Plotting the activity #
# --------------------- #
def plot_activity(gid_recorder=None, record=None, network=None, gids=None,
axis=None, show=False, limits=None, histogram=False,
title=None, fignum=None, label=None, sort=None,
average=False, normalize=1., decimate=None, transparent=True,
kernel_center=0., kernel_std=None, resolution=None,
cut_gaussian=5., **kwargs):
'''
Plot the monitored activity.
.. versionchanged:: 1.2
Switched `hist` to `histogram` and default value to False.
.. versionchanged:: 1.0.1
Added `axis` parameter, restored missing `fignum` parameter.
Parameters
----------
gid_recorder : tuple or list of tuples, optional (default: None)
The gids of the recording devices. If None, then all existing
"spike_detector"s are used.
record : tuple or list, optional (default: None)
List of the monitored variables for each device. If `gid_recorder` is
None, record can also be None and only spikes are considered.
network : :class:`~nngt.Network` or subclass, optional (default: None)
Network which activity will be monitored.
gids : tuple, optional (default: None)
NEST gids of the neurons which should be monitored.
axis : matplotlib axis object, optional (default: new one)
Axis that should be use to plot the activity. This takes precedence
over `fignum`.
show : bool, optional (default: False)
Whether to show the plot right away or to wait for the next plt.show().
histogram : bool, optional (default: False)
Whether to display the histogram when plotting spikes rasters.
limits : tuple, optional (default: None)
Time limits of the plot (if not specified, times of first and last
spike for raster plots).
title : str, optional (default: None)
Title of the plot.
fignum : int, or dict, optional (default: None)
Plot the activity on an existing figure (from ``figure.number``). This
parameter is ignored if `axis` is provided.
label : str or list, optional (default: None)
Add labels to the plot (one per recorder).
sort : str or list, optional (default: None)
Sort neurons using a topological property ("in-degree", "out-degree",
"total-degree" or "betweenness"), an activity-related property
("firing_rate" or neuronal property) or a user-defined list of sorted
neuron ids. Sorting is performed by increasing value of the `sort`
property from bottom to top inside each group.
normalize : float or list, optional (default: None)
Normalize the recorded results by a given float. If a list is provided,
there should be one entry per voltmeter or multimeter in the recorders.
If the recording was done through `monitor_groups`, the population can
be passed to normalize the data by the nuber of nodes in each group.
decimate : int or list of ints, optional (default: None)
Represent only a fraction of the spiking neurons; only one neuron in
`decimate` will be represented (e.g. setting `decimate` to 5 will lead
to only 20% of the neurons being represented). If a list is provided,
it must have one entry per NeuralGroup in the population.
kernel_center : float, optional (default: 0.)
Temporal shift of the Gaussian kernel, in ms (for the histogram).
kernel_std : float, optional (default: 0.5% of simulation time)
Characteristic width of the Gaussian kernel (standard deviation) in ms
(for the histogram).
resolution : float or array, optional (default: `0.1*kernel_std`)
The resolution at which the firing rate values will be computed.
Choosing a value smaller than `kernel_std` is strongly advised.
If resolution is an array, it will be considered as the times were the
firing rate should be computed (for the histogram).
cut_gaussian : float, optional (default: 5.)
Range over which the Gaussian will be computed (for the histogram).
By default, we consider the 5-sigma range. Decreasing this value will
increase speed at the cost of lower fidelity; increasing it with
increase the fidelity at the cost of speed.
**kwargs : dict
"color" and "alpha" values can be overriden here.
Warning
-------
Sorting with "firing_rate" only works if NEST gids form a continuous
integer range.
Returns
-------
lines : list of lists of :class:`matplotlib.lines.Line2D`
Lines containing the data that was plotted, grouped by figure.
'''
import matplotlib.pyplot as plt
lst_rec, lst_labels, lines, axes, labels = [], [], {}, {}, {}
# normalize recorders and recordables
if gid_recorder is not None:
assert record is not None, "`record` must also be provided."
if len(record) != len(gid_recorder):
raise InvalidArgument('`record` must either be the same for all '
'recorders, or contain one entry per '
'recorder in `gid_recorder`')
for rec in gid_recorder:
if isinstance(gid_recorder[0], tuple):
lst_rec.append(rec[0])
else:
lst_rec.append(rec)
else:
lst_rec = nest.GetNodes(
(0,), properties={'model': 'spike_detector'})[0]
record = tuple("spikes" for _ in range(len(lst_rec)))
# get gids and groups
gids = network.nest_gids if (gids is None and network is not None) \
else gids
if gids is None:
gids = []
for rec in lst_rec:
gids.extend(nest.GetStatus([rec])[0]["events"]["senders"])
gids = np.unique(gids)
num_group = 1 if network is None else len(network.population)
num_lines = max(num_group, len(lst_rec))
# sorting
sorted_neurons = np.array([])
if len(gids):
sorted_neurons = np.arange(
np.max(gids) + 1).astype(int) - np.min(gids) + 1
attr = None
if sort is not None:
assert network is not None, "`network` is required for sorting."
if nonstring_container(sort):
attr = sort
sorted_neurons = _sort_neurons(attr, gids, network)
sort = "user defined sort"
else:
data = None
if sort.lower() in ("firing_rate", "b2"): # get senders
data = [[], []]
for rec in lst_rec:
info = nest.GetStatus([rec])[0]
if str(info["model"]) == "spike_detector":
data[0].extend(info["events"]["senders"])
data[1].extend(info["events"]["times"])
data = np.array(data).T
sorted_neurons, attr = _sort_neurons(
sort, gids, network, data=data, return_attr=True)
elif network is not None and network.is_spatial():
sorted_neurons, attr = _sort_neurons(
"space", gids, network, data=None, return_attr=True)
# spikes plotting
colors = palette_discrete(np.linspace(0, 1, num_lines))
num_raster, num_detec, num_meter = 0, 0, 0
fignums = fignum if isinstance(fignum, dict) else {}
decim = []
if decimate is None:
decim = [None for _ in range(num_lines)]
elif is_integer(decimate):
decim = [decimate for _ in range(num_lines)]
elif nonstring_container(decimate):
assert len(decimate) == num_lines, "`decimate` should have one " +\
"entry per plot."
decim = decimate
else:
raise AttributeError(
"`decimate` must be either an int or a list of `int`.")
# set labels
if label is None:
lst_labels = [None for _ in range(len(lst_rec))]
else:
if isinstance(label, str):
lst_labels = [label]
else:
lst_labels = label
if len(label) != len(lst_rec):
_log_message(logger, "WARNING",
'Incorrect length for `label`: expecting {} but got '
'{}.\nIgnoring.'.format(len(lst_rec), len(label)))
lst_labels = [None for _ in range(len(lst_rec))]
datasets = []
max_time = 0.
for rec in lst_rec:
info = nest.GetStatus([rec])[0]
if len(info["events"]["times"]):
max_time = max(max_time, np.max(info["events"]["times"]))
datasets.append(info)
if kernel_std is None:
kernel_std = max_time*0.005
if resolution is None:
resolution = 0.5*kernel_std
# plot
for info, var, lbl in zip(datasets, record, lst_labels):
fnum = fignums.get(info["model"], fignum)
if info["model"] not in labels:
labels[info["model"]] = []
lines[info["model"]] = []
if str(info["model"]) == "spike_detector":
if "spike_detector" in axes:
axis = axes["spike_detector"]
c = colors[num_raster]
times, senders = info["events"]["times"], info["events"]["senders"]
sorted_ids = sorted_neurons[senders]
l = raster_plot(times, sorted_ids, color=c, show=False,
limits=limits, sort=sort, fignum=fnum, axis=axis,
decimate=decim[num_raster], sort_attribute=attr,
network=network, histogram=histogram,
transparent=transparent,
hist_ax=axes.get('histogram', None),
kernel_center=kernel_center,
kernel_std=kernel_std, resolution=resolution,
cut_gaussian=cut_gaussian)
num_raster += 1
if l:
fig_raster = l[0].figure.number
fignums['spike_detector'] = fig_raster
axes['spike_detector'] = l[0].axes
labels["spike_detector"].append(lbl)
lines["spike_detector"].extend(l)
if histogram:
axes['histogram'] = l[1].axes
elif "detector" in str(info["model"]):
c = colors[num_detec]
times, senders = info["events"]["times"], info["events"]["senders"]
sorted_ids = sorted_neurons[senders]
l = raster_plot(times, sorted_ids, fignum=fnum, color=c, axis=axis,
show=False, histogram=histogram, limits=limits,
kernel_center=kernel_center,
kernel_std=kernel_std, resolution=resolution,
cut_gaussian=cut_gaussian)
if l:
fig_detect = l[0].figure.number
num_detec += 1
fignums[info["model"]] = fig_detect
labels[info["model"]].append(lbl)
lines[info["model"]].extend(l)
if histogram:
axes['histogram'] = l[1].axes
else:
da_time = info["events"]["times"]
# prepare axis setup
fig = None
if axis is None:
fig = plt.figure(fnum)
fignums[info["model"]] = fig.number
else:
fig = axis.get_figure()
lines_tmp, labels_tmp = [], []
if nonstring_container(var):
m_colors = palette_discrete(np.linspace(0, 1, len(var)))
axes = fig.axes
if axis is not None:
# multiple y axes on a single subplot, adapted from
# https://matplotlib.org/examples/pylab_examples/
# multiple_yaxis_with_spines.html
axes = [axis]
axis.name = var[0]
if len(var) > 1:
axes.append(axis.twinx())
axes[-1].name = var[1]
if len(var) > 2:
fig.subplots_adjust(right=0.75)
for i, name in zip(range(len(var)-2), var[2:]):
new_ax = axis.twinx()
new_ax.spines["right"].set_position(
("axes", 1.2*(i+1)))
axes.append(new_ax)
_make_patch_spines_invisible(new_ax)
new_ax.spines["right"].set_visible(True)
axes[-1].name = name
if not axes:
axes = _set_new_plot(fig.number, names=var)[1]
labels_tmp = [lbl for _ in range(len(var))]
for subvar, c in zip(var, m_colors):
c = kwargs.get('color', c)
alpha = kwargs.get('alpha', 1)
for ax in axes:
if ax.name == subvar:
da_subvar = info["events"][subvar]
if isinstance(normalize, nngt.NeuralPop):
da_subvar /= normalize[num_meter].size
elif nonstring_container(normalize):
da_subvar /= normalize[num_meter]
elif normalize is not None:
da_subvar /= normalize
lines_tmp.extend(
ax.plot(da_time, da_subvar, color=c,
alpha=alpha))
ax.set_ylabel(subvar)
ax.set_xlabel("time")
if limits is not None:
ax.set_xlim(limits[0], limits[1])
else:
num_axes, ax = len(fig.axes), axis
if axis is None:
ax = fig.add_subplot(num_axes + 1, 1, num_axes + 1)
da_var = info["events"][var]
c = kwargs.get('color', None)
alpha = kwargs.get('alpha', 1)
lines_tmp.extend(ax.plot(da_time, da_var/normalize, color=c,
alpha=alpha))
labels_tmp.append(lbl)
ax.set_ylabel(var)
ax.set_xlabel("time")
labels[info["model"]].extend(labels_tmp)
lines[info["model"]].extend(lines_tmp)
num_meter += 1
if "spike_detector" in axes:
ax = axes['spike_detector']
if limits is not None:
ax.set_xlim(limits[0], limits[1])
else:
t_min, t_max, idx_min, idx_max = np.inf, -np.inf, np.inf, -np.inf
for l in ax.lines:
t_max = max(np.max(l.get_xdata()), t_max)
t_min = min(np.min(l.get_xdata()), t_max)
idx_min = min(np.min(l.get_ydata()), idx_min)
idx_max = max(np.max(l.get_ydata()), idx_max)
dt = t_max - t_min
didx = idx_max - idx_min
pc = 0.02
if not np.any(np.isinf((t_max, t_min))):
ax.set_xlim([t_min - pc*dt, t_max + pc*dt])
if not np.any(np.isinf((idx_min, idx_max))):
ax.set_ylim([idx_min - pc*didx, idx_max + pc*didx])
for recorder in fignums:
fig = plt.figure(fignums[recorder])
if title is not None:
fig.suptitle(title)
if label is not None:
fig.legend(lines[recorder], labels[recorder])
if show:
plt.show()
return lines
def raster_plot(times, senders, limits=None, title="Spike raster",
histogram=False, num_bins=1000, color="b", decimate=None,
axis=None, fignum=None, label=None, show=True, sort=None,
sort_attribute=None, network=None, transparent=True,
kernel_center=0., kernel_std=30., resolution=None,
cut_gaussian=5., **kwargs):
"""
Plotting routine that constructs a raster plot along with
an optional histogram.
.. versionchanged:: 1.2
Switched `hist` to `histogram`.
.. versionchanged:: 1.0.1
Added `axis` parameter.
Parameters
----------
times : list or :class:`numpy.ndarray`
Spike times.
senders : list or :class:`numpy.ndarray`
Index for the spiking neuron for each time in `times`.
limits : tuple, optional (default: None)
Time limits of the plot (if not specified, times of first and last
spike).
title : string, optional (default: 'Spike raster')
Title of the raster plot.
histogram : bool, optional (default: True)
Whether to plot the raster's histogram.
num_bins : int, optional (default: 1000)
Number of bins for the histogram.
color : string or float, optional (default: 'b')
Color of the plot lines and markers.
decimate : int, optional (default: None)
Represent only a fraction of the spiking neurons; only one neuron in
`decimate` will be represented (e.g. setting `decimate` to 10 will lead
to only 10% of the neurons being represented).
axis : matplotlib axis object, optional (default: new one)
Axis that should be use to plot the activity.
fignum : int, optional (default: None)
Id of another raster plot to which the new data should be added.
label : str, optional (default: None)
Label the current data.
show : bool, optional (default: True)
Whether to show the plot right away or to wait for the next plt.show().
kernel_center : float, optional (default: 0.)
Temporal shift of the Gaussian kernel, in ms.
kernel_std : float, optional (default: 30.)
Characteristic width of the Gaussian kernel (standard deviation) in ms.
resolution : float or array, optional (default: `0.1*kernel_std`)
The resolution at which the firing rate values will be computed.
Choosing a value smaller than `kernel_std` is strongly advised.
If resolution is an array, it will be considered as the times were the
firing rate should be computed.
cut_gaussian : float, optional (default: 5.)
Range over which the Gaussian will be computed (for the histogram).
By default, we consider the 5-sigma range. Decreasing this value will
increase speed at the cost of lower fidelity; increasing it with
increase the fidelity at the cost of speed.
Returns
-------
lines : list of :class:`matplotlib.lines.Line2D`
Lines containing the data that was plotted.
"""
import matplotlib.pyplot as plt
lines = []
mpl_kwargs = {k: v for k, v in kwargs.items() if k != 'hist_ax'}
if label is None:
mpl_kwargs['label'] = label
# decimate if necessary
if decimate is not None:
idx_keep = np.where(np.mod(senders, decimate) == 0)[0]
senders = senders[idx_keep]
times = times[idx_keep]
if len(times):
if axis is not None:
fig = axis.get_figure()
else:
fig = plt.figure(fignum)
if transparent:
fig.patch.set_visible(False)
ylabel = "Neuron ID"
xlabel = "Time (ms)"
delta_t = 0.01*(times[-1]-times[0])
if histogram:
ax1, ax2 = None, None
if kwargs.get("hist_ax", None) is None:
num_axes = len(fig.axes)
for i, old_ax in enumerate(fig.axes):
old_ax.change_geometry(num_axes + 2, 1, i+1)
ax1 = fig.add_subplot(num_axes + 2, 1, num_axes + 1)
ax2 = fig.add_subplot(num_axes + 2, 1, num_axes + 2,
sharex=ax1)
else:
ax1 = axis
ax2 = kwargs["hist_ax"]
if limits is not None:
start, stop = limits
keep = (times >= start)&(times <= stop)
times = times[keep]
senders = senders[keep]
lines.extend(ax1.plot(
times, senders, c=color, marker="o", linestyle='None',
mec="k", mew=0.5, ms=4, **mpl_kwargs))
ax1_lines = ax1.lines
if len(ax1_lines) > 1:
t_max = max(ax1_lines[0].get_xdata().max(),times[-1])
ax1.set_xlim([-delta_t, t_max+delta_t])
ax1.set_ylabel(ylabel)
if limits is not None:
ax1.set_xlim(*limits)
fr, fr_times = total_firing_rate(
data=np.array([senders, times]).T, kernel_center=kernel_center,
kernel_std=kernel_std, resolution=resolution,
cut_gaussian=cut_gaussian)
hist_lines = ax2.get_lines()
if hist_lines:
data = hist_lines[-1].get_data()
bottom = data[1]
if limits is None:
dt = fr_times[1] - fr_times[0]
old_times = data[0]
old_start = int(old_times[0] / dt)
new_start = int(fr_times[0] / dt)
old_end = int(old_times[-1] / dt)
new_end = int(fr_times[-1] / dt)
diff_start = new_start-old_start
diff_end = new_end-old_end
if diff_start > 0:
bottom = bottom[diff_start:]
else:
bottom = np.concatenate(
(np.zeros(-diff_start), bottom))
if diff_end > 0:
bottom = np.concatenate((bottom, np.zeros(diff_end)))
else:
bottom = bottom[:diff_end-1]
b_len, h_len = len(bottom), len(fr)
if b_len > h_len:
bottom = bottom[:h_len]
elif b_len < h_len:
bottom = np.concatenate(
(bottom, np.zeros(h_len-b_len)))
else:
bottom = bottom[:-1]
ax2.fill_between(fr_times, fr + bottom, bottom, color=color)
lines.extend(ax2.plot(fr_times, fr + bottom, ls="", marker=""))
else:
ax2.fill_between(fr_times, fr, 0., color=color)
lines.extend(ax2.plot(fr_times, fr, ls="", marker=""))
ax2.set_ylabel("Rate (Hz)")
ax2.set_xlabel(xlabel)
ax2.set_xlim(ax1.get_xlim())
_second_axis(sort, sort_attribute, ax1)
else:
if axis is not None:
ax = axis
else:
num_axes = len(fig.axes)
for i, old_ax in enumerate(fig.axes):
old_ax.change_geometry(num_axes + 1, 1, i+1)
ax = fig.add_subplot(num_axes + 1, 1, num_axes + 1)
if limits is not None:
start, stop = limits
keep = (times >= start)&(times <= stop)
times = times[keep]
senders = senders[keep]
if network is not None:
pop = network.population
colors = palette_discrete(np.linspace(0, 1, len(pop)))
mm = itertools.cycle(markers)
for m, (k, v), c in zip(mm, pop.items(), colors):
keep = np.where(
np.in1d(senders, network.nest_gids[v.ids]))[0]
if len(keep):
if label is None:
mpl_kwargs['label'] = k
lines.extend(ax.plot(
times[keep], senders[keep], c=c, marker=m,
ls='None', mec='k', mew=0.5, ms=4, **mpl_kwargs))
else:
lines.extend(ax.plot(
times, senders, c=color, marker="o", linestyle='None',
mec="k", mew=0.5, ms=4, **mpl_kwargs))
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
if limits is not None:
ax.set_xlim(limits)
else:
_set_ax_lims(ax, np.max(times), np.min(times), np.max(senders),
np.min(senders))
if label is not None:
ax.legend(bbox_to_anchor=(1.1, 1.2))
_second_axis(sort, sort_attribute, ax)
fig.suptitle(title)
if show:
plt.show()
else:
_log_message(logger, "WARNING",
"No activity was detected during the simulation.")
return lines
#-----------------------------------------------------------------------------
# Tools
#------------------------
#
def _fill_between_steps(x, y1, y2=0, h_align='mid'):
'''
Fills a hole in matplotlib: fill_between for step plots.
Parameters :
------------
x : array-like
Array/vector of index values. These are assumed to be equally-spaced.
If not, the result will probably look weird...
y1 : array-like
Array/vector of values to be filled under.
y2 : array-Like
Array/vector or bottom values for filled area. Default is 0.
'''
# First, duplicate the x values
xx = np.repeat(x,2)
# Now: the average x binwidth
xstep = np.repeat((x[1:] - x[:-1]), 2)
xstep = np.concatenate(([xstep[0]], xstep, [xstep[-1]]))
# Now: add one step at end of row.
#~ xx = np.append(xx, xx.max() + xstep[-1])
# Make it possible to change step alignment.
if h_align == 'mid':
xx -= xstep / 2.
elif h_align == 'right':
xx -= xstep
# Also, duplicate each y coordinate in both arrays
y1 = np.repeat(y1,2)#[:-1]
if type(y2) == np.ndarray:
y2 = np.repeat(y2,2)#[:-1]
return xx, y1, y2
def _moving_average (values, window):
weights = np.repeat(1.0, window)/window
sma = np.convolve(values, weights, 'same')
return sma
def _second_axis(sort, sort_attribute, ax):
import matplotlib.pyplot as plt
if sort is not None:
fig = ax.get_figure()
twin = None
for axis in fig.axes:
if axis.get_ylabel() == sort:
twin = axis
break
if twin is None:
asort = np.argsort(sort_attribute)
twin = ax.twinx()
twin.grid(False)
twin.set_ylabel(sort)
plt.draw()
old_ticks = ax.get_yticks()
twin.set_yticks(old_ticks)
twin.set_ylim(ax.get_ylim())
labels = ['' for _ in range(len(old_ticks))]
idx_max = len(sort_attribute) - 1
for i, t in enumerate(old_ticks):
if t >= 0:
idx = min(int(t), idx_max)
labels[i] = _sci_format(sort_attribute[asort[idx]])
twin.set_yticklabels(labels)
def _sci_format(n):
label = ''
if np.abs(n) < 0.01 or np.abs(n) >= 1000:
a = '{:.1E}'.format(n)
label = '$' + a.split('E')[0].rstrip('0').rstrip('.') + '\\cdot 10^{'
exponent = a.split('E')[1].lstrip('0')
if exponent[0] == '-':
exponent = exponent[0] + exponent[1:].lstrip('0')
elif exponent[0] == '+':
exponent = exponent[1:].lstrip('0')
label += exponent + '}$'
elif np.abs(n) >= 100:
label = '{:.0f}'.format(n)
elif np.abs(n) >= 10:
label = '{:.1f}'.format(n)
else:
label = '{:.2f}'.format(n)
return label
def _make_patch_spines_invisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.values():
sp.set_visible(False)
|
Silmathoron/NNGT
|
nngt/simulation/nest_plot.py
|
Python
|
gpl-3.0
| 29,385
|
[
"Gaussian",
"NEURON"
] |
a704c3b171ae1e5506aef099b3b7e9d410a1768926b40dfda432c4fe1ab73d39
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.