gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
import unittest
from decimal import Decimal
import iso8601
from lxml import etree
from pythonic_testcase import assert_equals, assert_raises
from soapfish import xsd, xsdspec
class Aircraft(xsd.ComplexType):
tail_number = xsd.Attribute(xsd.String)
class Airport(xsd.ComplexType):
type = xsd.Element(xsd.String)
code = xsd.Element(xsd.String)
@classmethod
def create(cls, type, code):
airport = Airport()
airport.type = type
airport.code = code
return airport
class Pilot(xsd.String):
enumeration = ['CAPTAIN', 'FIRST_OFFICER']
class Flight(xsd.ComplexType):
tail_number = xsd.Element(xsd.String)
takeoff_datetime = xsd.Element(xsd.DateTime, minOccurs=0)
takeoff_airport = xsd.Element(Airport)
landing_airport = xsd.Element(Airport)
takeoff_pilot = xsd.Element(Pilot, minOccurs=0)
landing_pilot = xsd.Element(Pilot, minOccurs=0)
passengers = xsd.ListElement(xsd.String, 'passenger', maxOccurs=10, minOccurs=0)
class ElementTest(unittest.TestCase):
# This logic have been moved to post rendering validation uncomment when implemented.
# def test_required(self):
# tail_number = xsd.Element(xsd.String)
# try:
# xmlelement = etree.Element('aircraft')
# tail_number.render(xmlelement, 'tail_number', None)
# except ValueError:
# pass
# else:
# raise AssertionError('Should get here')
def test_string_element(self):
tail_number = xsd.Element(xsd.String())
xmlelement = etree.Element('aircraft')
tail_number.render(xmlelement, 'tail_number', 'LN-KKU')
self.assertEqual(b'''<aircraft>
<tail_number>LN-KKU</tail_number>
</aircraft>
''',
etree.tostring(xmlelement, pretty_print=True))
def test_complex_type_element(self):
airport = Airport()
airport.type = 'IATA'
airport.code = 'WAW'
xmlelement = etree.Element('takeoff_airport')
airport.render(xmlelement, airport)
expected_xml = b'''<takeoff_airport>
<type>IATA</type>
<code>WAW</code>
</takeoff_airport>
'''
xml = etree.tostring(xmlelement, pretty_print=True)
self.assertEqual(expected_xml, xml)
def test_tagname_parsexml(self):
class TestType(xsd.ComplexType):
foo = xsd.Element(xsd.String, tagname='bar')
xml = b'<T><bar>coucou</bar></T>'
obj = TestType.parsexml(xml)
self.assertEquals('coucou', obj.foo)
def test_tagname_parse_xmlelement(self):
class TestType(xsd.ComplexType):
foo = xsd.Element(xsd.String, tagname='bar')
xml = b'<T><bar>coucou</bar></T>'
xmlelement = etree.fromstring(xml)
obj = TestType.parse_xmlelement(xmlelement)
self.assertEquals('coucou', obj.foo)
def test_tagname_render(self):
class TestType(xsd.ComplexType):
foo = xsd.Element(xsd.String, tagname='bar')
obj = TestType(foo='coucou')
xmlelement = etree.Element('T')
obj.render(xmlelement, obj)
xml = etree.tostring(xmlelement)
self.assertEquals(b'<T><bar>coucou</bar></T>', xml)
def test_stringify_complextype(self):
flight = Flight(takeoff_airport=Airport())
str(flight)
class ListElementTest(unittest.TestCase):
def test_rendering_simple_type(self):
passengers = xsd.ListElement(xsd.String, 'passenger', maxOccurs=10, minOccurs=0)
passengers_list = ['abc', '123']
xmlelement = etree.Element('flight')
passengers.render(xmlelement, 'passenger', passengers_list)
expected_xml = b'''<flight>
<passenger>abc</passenger>
<passenger>123</passenger>
</flight>
'''
xml = etree.tostring(xmlelement, pretty_print=True)
self.assertEqual(expected_xml, xml)
def test_parsing(self):
class Test(xsd.ComplexType):
values = xsd.ListElement(xsd.Int, 'value')
XML = b'''
<test>
<value>1</value>
<value>2</value>
</test>'''
test = Test.parsexml(XML)
self.assertEqual(2, len(test.values))
self.assertEqual(1, test.values[0])
def test_append_restriction(self):
l = xsd.ListElement(xsd.String, maxOccurs=1, tagname='toto').empty_value()
l.append('a')
e = assert_raises(ValueError, lambda: l.append('a'))
assert_equals('You must not add more than 1 items to this list.', str(e))
def test_append_with_max_occurs_unbounded(self):
l = xsd.ListElement(xsd.String, maxOccurs=xsd.UNBOUNDED, tagname='toto').empty_value()
l.append('a')
l.append('a')
class BooleanTypeTest(unittest.TestCase):
def test_element_true(self):
mixed = xsd.Element(xsd.Boolean,)
xmlelement = etree.Element('complexType')
mixed.render(xmlelement, 'mixed', True)
expected_xml = b'''<complexType>
<mixed>true</mixed>
</complexType>
'''
xml = etree.tostring(xmlelement, pretty_print=True)
self.assertEqual(expected_xml, xml)
def test_boolean_correctly_renders_false_value_in_xml(self):
# regression test for http://code.google.com/p/soapfish/issues/detail?id=3
# before xsd.Boolean would render [true, false] Python values *both*
# to as 'true' in the xml.
parent = etree.Element('parent')
xsd.Element(xsd.Boolean).render(parent, 'b', True)
self.assertEqual(b'<parent><b>true</b></parent>', etree.tostring(parent))
parent = etree.Element('parent')
xsd.Element(xsd.Boolean).render(parent, 'b', False)
self.assertEqual(b'<parent><b>false</b></parent>', etree.tostring(parent))
def test_attribute_false(self):
mixed = xsd.Attribute(xsd.Boolean)
xmlelement = etree.Element('complexType')
mixed.render(xmlelement, 'mixed', True)
expected_xml = b'<complexType mixed="true"/>\n'
xml = etree.tostring(xmlelement, pretty_print=True)
self.assertEqual(expected_xml, xml)
def test_attribute_nil(self):
mixed = xsd.Attribute(xsd.Boolean, nillable=True, use=xsd.Use.OPTIONAL)
xmlelement = etree.Element('complexType')
mixed.render(xmlelement, 'mixed', xsd.NIL)
expected_xml = b'<complexType mixed="nil"/>\n'
xml = etree.tostring(xmlelement, pretty_print=True)
self.assertEqual(expected_xml, xml)
class DecimalTypeTest(unittest.TestCase):
def test_python_decimal(self):
class Test(xsd.ComplexType):
float = xsd.Element(xsd.Decimal())
test = Test()
test.float = Decimal('2.2')
def test_enumeration(self):
class Test(xsd.ComplexType):
integer = xsd.Element(xsd.Decimal(enumeration=[1, 2, 3]))
test = Test()
try:
test.integer = 4
except ValueError:
pass
else:
self.fail('Should not get here.')
def test_fractionDigits(self):
class Test(xsd.ComplexType):
float = xsd.Element(xsd.Decimal(fractionDigits=2))
test = Test()
test.float = 2.22
try:
test.float = 2.2
except ValueError:
pass
else:
self.fail('Should not get here.')
def test_Inclusive(self):
class Test(xsd.ComplexType):
value = xsd.Element(xsd.Decimal(minInclusive=0, maxInclusive=100))
test = Test()
test.value = 0
test.value = 50
test.value = 100
try:
test.value = -1
except ValueError:
pass
else:
self.fail('Should not get here.')
try:
test.value = 101
except ValueError:
pass
else:
self.fail('Should not get here.')
def test_Exclusive(self):
class Test(xsd.ComplexType):
value = xsd.Element(xsd.Decimal(minExclusive=-100, maxExclusive=0))
test = Test()
test.value = -99
test.value = -50
test.value = -1
try:
test.value = -100
except ValueError:
pass
else:
self.fail('Should not get here.')
try:
test.value = 0
except ValueError:
pass
else:
self.fail('Should not get here.')
try:
test.value = 1
except ValueError:
pass
else:
self.fail('Should not get here.')
try:
test.value = -101
except ValueError:
pass
else:
self.fail('Should not get here.')
def test_pattern(self):
class Test(xsd.ComplexType):
value = xsd.Element(xsd.Decimal(pattern=r'1+'))
test = Test()
test.value = 11
test.value = 111
try:
test.value = 2
except ValueError:
pass
else:
self.fail('Should not get here.')
def test_totalDigits(self):
class Test(xsd.ComplexType):
value = xsd.Element(xsd.Decimal(totalDigits=4))
test = Test()
test.value = 1.2
test.value = 22.22
test.value = 1.234
try:
test.value = 12.345
except ValueError:
pass
else:
self.fail('Should not get here.')
try:
test.value = 12345
except ValueError:
pass
else:
self.fail('Should not get here.')
def test_rendring(self):
class Test(xsd.ComplexType):
value = xsd.Element(xsd.Decimal)
test = Test()
test.value = 4.13
xml = test.xml('test')
self.assertEqual(b'<test>\n <value>4.13</value>\n</test>\n', xml)
def test_parsing(self):
xml = b'<test><value>3.14</value></test>'
class Test(xsd.ComplexType):
value = xsd.Element(xsd.Decimal)
test = Test.parsexml(xml)
self.assertEqual(test.value, 3.14, 'PI value is wrong OMG!')
class IntegerTypeTest(unittest.TestCase):
def test_rendering_and_parsing(self):
class Test(xsd.ComplexType):
value = xsd.Element(xsd.Integer(totalDigits=2))
test = Test()
test.value = 22
xml = test.xml('test')
XML = b'<test>\n <value>22</value>\n</test>\n'
self.assertEqual(XML, xml)
test1 = Test.parsexml(XML)
self.assertEqual(22, test1.value)
def test_Int(self):
class Test(xsd.ComplexType):
value = xsd.Element(xsd.Int)
test = Test()
test.value = 1
self.assertEqual(1, test.value)
try:
test.value = 2147483648
except ValueError:
pass
else:
self.fail('Should not get here.')
try:
test.value = -2147483649
except ValueError:
pass
else:
self.fail('Should not get here.')
def test_Long(self):
class Test(xsd.ComplexType):
value = xsd.Element(xsd.Long)
test = Test()
test.value = 1
self.assertEqual(1, test.value)
try:
test.value = 9223372036854775807 + 1
except ValueError:
pass
else:
self.fail('Should not get here.')
try:
test.value = -9223372036854775808 - 1
except ValueError:
pass
else:
self.fail('Should not get here.')
class ComplexTest(unittest.TestCase):
def test_rendering(self):
airport = Airport()
airport.type = 'IATA'
airport.code = 'WAW'
xmlelement = etree.Element('airport')
airport.render(xmlelement, airport)
xml = etree.tostring(xmlelement, pretty_print=True)
expected_xml = b'''<airport>
<type>IATA</type>
<code>WAW</code>
</airport>
'''
self.assertEqual(expected_xml, xml)
def test_attribute_rendering(self):
aircraft = Aircraft()
aircraft.tail_number = 'LN-KKX'
xmlelement = etree.Element('aircraft')
aircraft.render(xmlelement, aircraft)
expected_xml = b'<aircraft tail_number="LN-KKX"/>\n'
xml = etree.tostring(xmlelement, pretty_print=True)
self.assertEqual(expected_xml, xml)
def test_attribute_parsing(self):
XML = b'<aircraft tail_number="LN-KKX"/>\n'
aircraft = Aircraft.parsexml(XML)
self.assertEqual('LN-KKX', aircraft.tail_number)
def test_multilayer_complex(self):
flight = Flight()
flight.tail_number = 'LN-KKA'
flight.takeoff_airport = Airport.create('IATA', 'WAW')
flight.landing_airport = Airport.create('ICAO', 'EGLL')
try:
flight.takeoff_pilot = 'ABC'
except ValueError:
pass
else:
self.fail()
flight.takeoff_pilot = 'CAPTAIN'
xmlelement = etree.Element('flight')
flight.render(xmlelement, flight)
xml = etree.tostring(xmlelement, pretty_print=True)
expected_xml = b'''<flight>
<tail_number>LN-KKA</tail_number>
<takeoff_airport>
<type>IATA</type>
<code>WAW</code>
</takeoff_airport>
<landing_airport>
<type>ICAO</type>
<code>EGLL</code>
</landing_airport>
<takeoff_pilot>CAPTAIN</takeoff_pilot>
</flight>
'''
self.assertEqual(expected_xml, xml)
def test_complex_with_list(self):
flight = Flight()
flight.tail_number = 'LN-KKA'
flight.takeoff_airport = Airport.create('IATA', 'WAW')
flight.landing_airport = Airport.create('ICAO', 'EGLL')
flight.passengers.append('abc')
flight.passengers.append('123')
xmlelement = etree.Element('flight')
flight.render(xmlelement, flight)
xml = etree.tostring(xmlelement, pretty_print=True)
expected_xml = b'''<flight>
<tail_number>LN-KKA</tail_number>
<takeoff_airport>
<type>IATA</type>
<code>WAW</code>
</takeoff_airport>
<landing_airport>
<type>ICAO</type>
<code>EGLL</code>
</landing_airport>
<passenger>abc</passenger>
<passenger>123</passenger>
</flight>
'''
self.assertEqual(expected_xml, xml)
def test_inheritance_rendering(self):
class A(xsd.ComplexType):
name = xsd.Attribute(xsd.String)
class B(A):
type = xsd.Attribute(xsd.String)
b = B()
b.name = 'b'
b.type = 'B'
xml = b.xml('inheritance')
EXPECTED_XML = b'<inheritance name="b" type="B"/>\n'
self.assertEqual(EXPECTED_XML, xml)
def test_inheritance_parsing(self):
class A(xsd.ComplexType):
name = xsd.Attribute(xsd.String)
class B(A):
type = xsd.Element(xsd.String)
XML = b'''<inheritance name="b">
<type>B</type>
</inheritance>\n'''
b = B.parsexml(XML)
self.assertEqual(b.name, 'b')
self.assertEqual(b.type, 'B')
def test_parsexml_with_soapfish_schema(self):
# sometimes it comes handy that soapfish can validate some XML against a
# provided soapfish schema (instead of an etree.XMLSchema) especially in
# testing.
class A(xsd.ComplexType):
name = xsd.Element(xsd.String, nillable=True)
ns = 'http://foo.example'
soapfish_schema = xsd.Schema(ns,
imports=[],
elementFormDefault=xsd.ElementFormDefault.UNQUALIFIED,
simpleTypes=[],
attributeGroups=[],
groups=[],
complexTypes=[A],
elements={'foo': xsd.Element(A)},
)
xml = '<test:foo xmlns:test="%s"><name>bar</name></test:foo>' % ns
foo = A.parsexml(xml, schema=soapfish_schema)
assert_equals('bar', foo.name)
class XMLParsingTest(unittest.TestCase):
SIMPLE_XML = b'''<flight>
<landing_airport>
<code>EGLL</code>
<type>ICAO</type>
</landing_airport>
<tail_number>LN-KKA</tail_number>
<takeoff_datetime>2001-10-26T21:32:52</takeoff_datetime>
<takeoff_airport>
<code>WAW</code>
<type>IATA</type>
</takeoff_airport>
</flight>
'''
def test_simple_parsing(self):
flight = Flight.parse_xmlelement(etree.fromstring(self.SIMPLE_XML))
self.assertEqual('LN-KKA', flight.tail_number)
self.assertEqual('WAW', flight.takeoff_airport.code)
self.assertEqual('IATA', flight.takeoff_airport.type)
self.assertEqual('EGLL', flight.landing_airport.code)
self.assertEqual('ICAO', flight.landing_airport.type)
self.assertEqual(iso8601.parse_date('2001-10-26T21:32:52Z'), flight.takeoff_datetime)
LIST_XML = b'''<flight>
<landing_airport>
<code>EGLL</code>
<type>ICAO</type>
</landing_airport>
<passenger>abc</passenger>
<passenger>123</passenger>
<tail_number>LN-KKA</tail_number>
<takeoff_airport>
<code>WAW</code>
<type>IATA</type>
</takeoff_airport>
</flight>
'''
def test_list_parsing(self):
flight = Flight.parse_xmlelement(etree.fromstring(self.LIST_XML))
self.assertEqual('LN-KKA', flight.tail_number)
self.assertEqual('WAW', flight.takeoff_airport.code)
self.assertEqual('IATA', flight.takeoff_airport.type)
self.assertEqual('EGLL', flight.landing_airport.code)
self.assertEqual('ICAO', flight.landing_airport.type)
self.assertEqual(['abc', '123'], flight.passengers)
class XSD_Spec_Test(unittest.TestCase):
AIRPORT_XML = '''
<xs:complexType name="airport" xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:sequence>
<xs:element name="code_type">
<xs:simpleType>
<xs:restriction base="xs:string">
<xs:enumeration value="ICAO"/>
<xs:enumeration value="IATA"/>
<xs:enumeration value="FAA"/>
</xs:restriction>
</xs:simpleType>
</xs:element>
<xs:element name="code" type="xs:string"/>
</xs:sequence>
</xs:complexType>'''
def test_complexType(self):
airport = xsdspec.XSDComplexType.parse_xmlelement(etree.fromstring(self.AIRPORT_XML))
self.assertEqual('airport', airport.name)
code_type_element = airport.sequence.elements[0]
code_element = airport.sequence.elements[1]
self.assertEqual('code_type', code_type_element.name)
self.assertEqual('xs:string', code_type_element.simpleType.restriction.base)
self.assertEqual(3, len(code_type_element.simpleType.restriction.enumerations))
self.assertEqual('ICAO', code_type_element.simpleType.restriction.enumerations[0].value)
self.assertEqual('code', code_element.name)
SCHEMA_XML = '''
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" targetNamespace="http://flightdataservices.com/ops.xsd">
<xs:complexType name="airport">
<xs:sequence>
<xs:element name="code_type">
<xs:simpleType>
<xs:restriction base="xs:string">
<xs:enumeration value="ICAO"/>
<xs:enumeration value="IATA"/>
<xs:enumeration value="FAA"/>
</xs:restriction>
</xs:simpleType>
</xs:element>
<xs:element name="code" type="xs:string"/>
</xs:sequence>
</xs:complexType>
<xs:complexType name="weight">
<xs:sequence>
<xs:element name="value" type="xs:integer"/>
<xs:element name="unit">
<xs:simpleType>
<xs:restriction base="xs:string">
<xs:enumeration value="kg"/>
<xs:enumeration value="lb"/>
</xs:restriction>
</xs:simpleType>
</xs:element>
</xs:sequence>
</xs:complexType>
<xs:simpleType name="pilot">
<xs:restriction base="xs:string">
<xs:enumeration value="CAPTAIN"/>
<xs:enumeration value="FIRST_OFFICER"/>
</xs:restriction>
</xs:simpleType>
<xs:complexType name="ops">
<xs:sequence>
<xs:element name="aircraft" type="xs:string"/>
<xs:element name="flight_number" type="xs:string"/>
<xs:element name="type">
<xs:simpleType>
<xs:restriction base="xs:string">
<xs:enumeration value="COMMERCIAL"/>
<xs:enumeration value="INCOMPLETE"/>
<xs:enumeration value="ENGINE_RUN_UP"/>
<xs:enumeration value="TEST"/>
<xs:enumeration value="TRAINING"/>
<xs:enumeration value="FERRY"/>
<xs:enumeration value="POSITIONING"/>
<xs:enumeration value="LINE_TRAINING"/>
</xs:restriction>
</xs:simpleType>
</xs:element>
<xs:element name="takeoff_airport" type="fds:airport"/>
<xs:element name="takeoff_gate_datetime" type="xs:dateTime" minOccurs="0"/>
<xs:element name="takeoff_datetime" type="xs:dateTime"/>
<xs:element name="takeoff_fuel" minOccurs="0" type="fds:weight"/>
<xs:element name="takeoff_gross_weight" minOccurs="0" type="fds:weight"/>
<xs:element name="takeoff_pilot" minOccurs="0" type="fds:pilot"/>
<xs:element name="landing_airport" type="fds:airport"/>
<xs:element name="landing_gate_datetime" type="xs:dateTime" minOccurs="0"/>
<xs:element name="landing_datetime" type="xs:dateTime"/>
<xs:element name="landing_fuel" minOccurs="0" type="fds:weight"/>
<xs:element name="landing_pilot" minOccurs="0" type="fds:pilot"/>
<xs:element name="destination_airport" minOccurs="0" type="fds:airport"/>
<xs:element name="captain_code" minOccurs="0" type="xs:string"/>
<xs:element name="first_officer_code" minOccurs="0" type="xs:string"/>
</xs:sequence>
</xs:complexType>
<xs:complexType name="status">
<xs:sequence>
<xs:element name="action">
<xs:simpleType>
<xs:restriction base="xs:string">
<xs:enumeration value="INSERTED"/>
<xs:enumeration value="UPDATED"/>
<xs:enumeration value="EXISTS"/>
</xs:restriction>
</xs:simpleType>
</xs:element>
<xs:element name="id" type="xs:long"/>
</xs:sequence>
</xs:complexType>
<xs:element name="ops" type="fds:ops"/>
<xs:element name="status" type="fds:status"/>
</xs:schema>'''
class SchemaTest(unittest.TestCase):
def test_schema_parsing(self):
schema = xsdspec.Schema.parse_xmlelement(etree.fromstring(SCHEMA_XML))
self.assertEqual(4, len(schema.complexTypes))
self.assertEqual(1, len(schema.simpleTypes))
self.assertEqual(2, len(schema.elements))
self.assertEqual('ops', schema.elements[0].name)
self.assertEqual('fds:ops', schema.elements[0].type)
ops_type = schema.complexTypes[2]
self.assertEqual('ops', ops_type.name)
self.assertEqual('aircraft', ops_type.sequence.elements[0].name)
self.assertEqual('xs:string', ops_type.sequence.elements[0].type)
class RequestResponseOperation(xsd.Group):
input = xsd.Element(xsd.String, minOccurs=0)
output = xsd.Element(xsd.String, minOccurs=0)
class Operation(xsd.ComplexType):
name = xsd.Element(xsd.String)
requestResponseOperation = xsd.Ref(RequestResponseOperation)
class GroupTest(unittest.TestCase):
XML = b'''<operation>
<name>TEST-Operation</name>
<input>IN</input>
<output>OUT</output>
</operation>\n'''
def test_rendering(self):
operation = Operation()
operation.name = 'TEST-Operation'
operation.requestResponseOperation.input = 'IN'
operation.requestResponseOperation.output = 'OUT'
xml = operation.xml('operation')
self.assertEqual(self.XML, xml)
def test_parsing(self):
operation = Operation.parsexml(self.XML)
self.assertEqual(operation.name, 'TEST-Operation')
self.assertEqual(operation.requestResponseOperation.input, 'IN')
self.assertEqual(operation.requestResponseOperation.output, 'OUT')
def test_rendering_empty_group(self):
operation = Operation()
operation.name = 'TEST-Operation'
xml = operation.xml('operation')
expected_xml = b'''<operation>
<name>TEST-Operation</name>
</operation>\n'''
self.assertEqual(expected_xml, xml)
# <xs:attributeGroup name="tHeaderAttributes">
# <xs:attribute name="message" type="xs:QName" use="required"/>
# <xs:attribute name="part" type="xs:NMTOKEN" use="required"/>
# <xs:attribute name="use" type="soap:useChoice" use="required"/>
# <xs:attribute name="encodingStyle" type="soap:encodingStyle" use="optional"/>
# <xs:attribute name="namespace" type="xs:anyURI" use="optional"/>
# </xs:attributeGroup>
class TBodyAttributes(xsd.AttributeGroup):
encodingStyle = xsd.Attribute(xsd.String, use=xsd.Use.OPTIONAL)
use = xsd.Attribute(xsd.String)
namespace = xsd.Attribute(xsd.String)
class TBody(xsd.ComplexType):
parts = xsd.Attribute(xsd.String)
tBodyAttributes = xsd.Ref(TBodyAttributes)
class AttributeGroupTest(unittest.TestCase):
def test_rendering(self):
body = TBody()
body.parts = 'Parts'
body.tBodyAttributes.use = 'required'
body.tBodyAttributes.namespace = 'xs'
expected_xml = b'<body parts="Parts" use="required" namespace="xs"/>\n'
xml = body.xml('body')
self.assertEqual(expected_xml, xml)
def test_parsing(self):
xml = b'<body parts="Parts" use="required" namespace="xs"/>\n'
body = TBody.parsexml(xml)
self.assertEqual(body.parts, 'Parts')
self.assertEqual(body.tBodyAttributes.use, 'required')
self.assertEqual(body.tBodyAttributes.namespace, 'xs')
self.assertEqual(body.tBodyAttributes.encodingStyle, None)
class AirporttDocument(xsd.Document):
airport = xsd.Element(Airport)
class DocumentTest(unittest.TestCase):
def test_document_rendering(self):
document = AirporttDocument()
document.airport = Airport(code='XXX', type='IATA')
xml = document.render()
expected_xml = b'''<airport>
<type>IATA</type>
<code>XXX</code>
</airport>\n'''
self.assertEqual(xml, expected_xml)
class NillableTest(unittest.TestCase):
def test_nilable_element_rendering(self):
class Test(xsd.ComplexType):
value = xsd.Element(xsd.Integer, nillable=True)
notnillable = xsd.Element(xsd.Integer, minOccurs=0)
test = Test()
test.value = xsd.NIL
xml = test.xml('test')
EXPECTED_XML = b'''<test>
<value xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:nil="true"/>
</test>\n'''
self.assertEqual(xml, EXPECTED_XML)
try:
test.notnillable = xsd.NIL
except ValueError:
pass
else:
self.fail('Should not get here.')
def test_nillable_element_parsing(self):
class Test(xsd.ComplexType):
value = xsd.Element(xsd.Long, nillable=True)
xml = b'<test><value xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:nil="true"/></test>'
test = Test.parsexml(xml)
self.assertEqual(test.value, xsd.NIL)
def test_nillable_list_rendering(self):
class Test(xsd.ComplexType):
values = xsd.ListElement(xsd.String, 'value', nillable=True)
notnillable = xsd.ListElement(xsd.String, 'notnillable', minOccurs=0)
test = Test()
test.values.append('XXX')
test.values.append(xsd.NIL)
xml = test.xml('test')
EXPECTED_XML = b'''<test>
<value>XXX</value>
<value xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:nil="true"/>
</test>\n'''
self.assertEqual(xml, EXPECTED_XML)
self.assertRaises(Exception, lambda: test.notnillable.append(xsd.NIL))
def test_nillable_list_parsing(self):
class Test(xsd.ComplexType):
values = xsd.ListElement(xsd.Int, 'value', nillable=True)
XML = b'''<test>
<value>1</value>
<value xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:nil="true"/>
<value xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:nil="true"/>
<value>2</value>
</test>
'''
test = Test.parsexml(XML)
self.assertEqual(test.values[0], 1)
self.assertEqual(test.values[1], xsd.NIL)
self.assertEqual(test.values[2], xsd.NIL)
self.assertEqual(test.values[3], 2)
def test_nillable_attribute(self):
class Test(xsd.ComplexType):
value = xsd.Attribute(xsd.String, nillable=True, use=xsd.Use.OPTIONAL)
test = Test()
self.assertEqual(test.xml('test'), b'<test/>\n')
test.value = xsd.NIL
self.assertEqual(b'<test value="nil"/>\n', test.xml('test'))
class ElementTypeEvaluation(unittest.TestCase):
def test_string_type_evalutation(self):
class B1(xsd.ComplexType):
a = xsd.Element('soapfish.xsd.String')
b = xsd.Element('soapfish.xsd.Integer')
b = B1()
b.a = 'test'
b.b = 123
class PatternTest(unittest.TestCase):
def test_string_pattern(self):
class Test(xsd.ComplexType):
value = xsd.Element(xsd.String(pattern=r'^a*$'))
test = Test()
test.value = 'a'
test.value = 'aaa'
try:
test.value = 'b'
except ValueError:
pass
else:
self.fail('Should not get here.')
class MaxOccursTest(unittest.TestCase):
def test_xmlvalue_simple(self):
max_occurs = xsd.MaxOccurs()
value = max_occurs.xmlvalue(1)
self.assertEqual('1', value)
value = max_occurs.xmlvalue(5)
self.assertEqual('5', value)
def test_xmlvalue_unbounded(self):
max_occurs = xsd.MaxOccurs()
value = max_occurs.xmlvalue(xsd.UNBOUNDED)
self.assertEqual('unbounded', value)
def test_pythonvalue_simple(self):
max_occurs = xsd.MaxOccurs()
value = max_occurs.pythonvalue('1')
self.assertEqual(1, value)
value = max_occurs.pythonvalue('5')
self.assertEqual(5, value)
def test_pythonvalue_unbounded(self):
max_occurs = xsd.MaxOccurs()
value = max_occurs.pythonvalue('unbounded')
self.assertEqual(xsd.UNBOUNDED, value)
if __name__ == '__main__':
unittest.main()
|
|
import difflib
import pprint
import pickle
import re
import sys
import warnings
import weakref
import inspect
from copy import deepcopy
from test import support
import unittest
from .support import (
TestEquality, TestHashing, LoggingResult,
ResultWithNoStartTestRunStopTestRun
)
class Test(object):
"Keep these TestCase classes out of the main namespace"
class Foo(unittest.TestCase):
def runTest(self): pass
def test1(self): pass
class Bar(Foo):
def test2(self): pass
class LoggingTestCase(unittest.TestCase):
"""A test case which logs its calls."""
def __init__(self, events):
super(Test.LoggingTestCase, self).__init__('test')
self.events = events
def setUp(self):
self.events.append('setUp')
def test(self):
self.events.append('test')
def tearDown(self):
self.events.append('tearDown')
class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
### Set up attributes used by inherited tests
################################################################
# Used by TestHashing.test_hash and TestEquality.test_eq
eq_pairs = [(Test.Foo('test1'), Test.Foo('test1'))]
# Used by TestEquality.test_ne
ne_pairs = [(Test.Foo('test1'), Test.Foo('runTest')),
(Test.Foo('test1'), Test.Bar('test1')),
(Test.Foo('test1'), Test.Bar('test2'))]
################################################################
### /Set up attributes used by inherited tests
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
# ...
# "methodName defaults to "runTest"."
#
# Make sure it really is optional, and that it defaults to the proper
# thing.
def test_init__no_test_name(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test().id()[-13:], '.Test.runTest')
# test that TestCase can be instantiated with no args
# primarily for use at the interactive interpreter
test = unittest.TestCase()
test.assertEqual(3, 3)
with test.assertRaises(test.failureException):
test.assertEqual(3, 2)
with self.assertRaises(AttributeError):
test.run()
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__valid(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test('test').id()[-10:], '.Test.test')
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__invalid(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
try:
Test('testfoo')
except ValueError:
pass
else:
self.fail("Failed to raise ValueError")
# "Return the number of tests represented by the this test object. For
# TestCase instances, this will always be 1"
def test_countTestCases(self):
class Foo(unittest.TestCase):
def test(self): pass
self.assertEqual(Foo('test').countTestCases(), 1)
# "Return the default type of test result object to be used to run this
# test. For TestCase instances, this will always be
# unittest.TestResult; subclasses of TestCase should
# override this as necessary."
def test_defaultTestResult(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
result = Foo().defaultTestResult()
self.assertEqual(type(result), unittest.TestResult)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if setUp() raises
# an exception.
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'addError', 'stopTest']
self.assertEqual(events, expected)
# "With a temporary result stopTestRun is called when setUp errors.
def test_run_call_order__error_in_setUp_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'addError',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test raises
# an error (as opposed to a failure).
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "With a default result, an error in the test still results in stopTestRun
# being called."
def test_run_call_order__error_in_test_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addError', 'stopTest', 'stopTestRun']
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test signals
# a failure (as opposed to an error).
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addFailure', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "When a test fails with a default result stopTestRun is still called."
def test_run_call_order__failure_in_test_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addFailure', 'stopTest', 'stopTestRun']
events = []
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if tearDown() raises
# an exception.
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
self.assertEqual(events, expected)
# "When tearDown errors with a default result stopTestRun is still called."
def test_run_call_order__error_in_tearDown_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
events = []
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "TestCase.run() still works when the defaultTestResult is a TestResult
# that does not support startTestRun and stopTestRun.
def test_run_call_order_default_result(self):
class Foo(unittest.TestCase):
def defaultTestResult(self):
return ResultWithNoStartTestRunStopTestRun()
def test(self):
pass
Foo('test').run()
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework. The initial value of this
# attribute is AssertionError"
def test_failureException__default(self):
class Foo(unittest.TestCase):
def test(self):
pass
self.assertIs(Foo('test').failureException, AssertionError)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__explicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
raise RuntimeError()
failureException = RuntimeError
self.assertIs(Foo('test').failureException, RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__implicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
self.fail("foo")
failureException = RuntimeError
self.assertIs(Foo('test').failureException, RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "The default implementation does nothing."
def test_setUp(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().setUp()
# "The default implementation does nothing."
def test_tearDown(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().tearDown()
# "Return a string identifying the specific test case."
#
# Because of the vague nature of the docs, I'm not going to lock this
# test down too much. Really all that can be asserted is that the id()
# will be a string (either 8-byte or unicode -- again, because the docs
# just say "string")
def test_id(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
self.assertIsInstance(Foo().id(), str)
# "If result is omitted or None, a temporary result object is created,
# used, and is made available to the caller. As TestCase owns the
# temporary result startTestRun and stopTestRun are called.
def test_run__uses_defaultTestResult(self):
events = []
defaultResult = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
events.append('test')
def defaultTestResult(self):
return defaultResult
# Make run() find a result object on its own
result = Foo('test').run()
self.assertIs(result, defaultResult)
expected = ['startTestRun', 'startTest', 'test', 'addSuccess',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "The result object is returned to run's caller"
def test_run__returns_given_result(self):
class Foo(unittest.TestCase):
def test(self):
pass
result = unittest.TestResult()
retval = Foo('test').run(result)
self.assertIs(retval, result)
# "The same effect [as method run] may be had by simply calling the
# TestCase instance."
def test_call__invoking_an_instance_delegates_to_run(self):
resultIn = unittest.TestResult()
resultOut = unittest.TestResult()
class Foo(unittest.TestCase):
def test(self):
pass
def run(self, result):
self.assertIs(result, resultIn)
return resultOut
retval = Foo('test')(resultIn)
self.assertIs(retval, resultOut)
def testShortDescriptionWithoutDocstring(self):
self.assertIsNone(self.shortDescription())
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithOneLineDocstring(self):
"""Tests shortDescription() for a method with a docstring."""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a docstring.')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithMultiLineDocstring(self):
"""Tests shortDescription() for a method with a longer docstring.
This method ensures that only the first line of a docstring is
returned used in the short description, no matter how long the
whole thing is.
"""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a longer '
'docstring.')
def testAddTypeEqualityFunc(self):
class SadSnake(object):
"""Dummy class for test_addTypeEqualityFunc."""
s1, s2 = SadSnake(), SadSnake()
self.assertFalse(s1 == s2)
def AllSnakesCreatedEqual(a, b, msg=None):
return type(a) == type(b) == SadSnake
self.addTypeEqualityFunc(SadSnake, AllSnakesCreatedEqual)
self.assertEqual(s1, s2)
# No this doesn't clean up and remove the SadSnake equality func
# from this TestCase instance but since its a local nothing else
# will ever notice that.
def testAssertIs(self):
thing = object()
self.assertIs(thing, thing)
self.assertRaises(self.failureException, self.assertIs, thing, object())
def testAssertIsNot(self):
thing = object()
self.assertIsNot(thing, object())
self.assertRaises(self.failureException, self.assertIsNot, thing, thing)
def testAssertIsInstance(self):
thing = []
self.assertIsInstance(thing, list)
self.assertRaises(self.failureException, self.assertIsInstance,
thing, dict)
def testAssertNotIsInstance(self):
thing = []
self.assertNotIsInstance(thing, dict)
self.assertRaises(self.failureException, self.assertNotIsInstance,
thing, list)
def testAssertIn(self):
animals = {'monkey': 'banana', 'cow': 'grass', 'seal': 'fish'}
self.assertIn('a', 'abc')
self.assertIn(2, [1, 2, 3])
self.assertIn('monkey', animals)
self.assertNotIn('d', 'abc')
self.assertNotIn(0, [1, 2, 3])
self.assertNotIn('otter', animals)
self.assertRaises(self.failureException, self.assertIn, 'x', 'abc')
self.assertRaises(self.failureException, self.assertIn, 4, [1, 2, 3])
self.assertRaises(self.failureException, self.assertIn, 'elephant',
animals)
self.assertRaises(self.failureException, self.assertNotIn, 'c', 'abc')
self.assertRaises(self.failureException, self.assertNotIn, 1, [1, 2, 3])
self.assertRaises(self.failureException, self.assertNotIn, 'cow',
animals)
def testAssertDictContainsSubset(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertDictContainsSubset({}, {})
self.assertDictContainsSubset({}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1, 'b': 2})
self.assertDictContainsSubset({'a': 1, 'b': 2}, {'a': 1, 'b': 2})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({1: "one"}, {})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 2}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
one = ''.join(chr(i) for i in range(255))
# this used to cause a UnicodeDecodeError constructing the failure msg
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'foo': one}, {'foo': '\uFFFD'})
def testAssertEqual(self):
equal_pairs = [
((), ()),
({}, {}),
([], []),
(set(), set()),
(frozenset(), frozenset())]
for a, b in equal_pairs:
# This mess of try excepts is to test the assertEqual behavior
# itself.
try:
self.assertEqual(a, b)
except self.failureException:
self.fail('assertEqual(%r, %r) failed' % (a, b))
try:
self.assertEqual(a, b, msg='foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with msg= failed' % (a, b))
try:
self.assertEqual(a, b, 'foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with third parameter failed' %
(a, b))
unequal_pairs = [
((), []),
({}, set()),
(set([4,1]), frozenset([4,2])),
(frozenset([4,5]), set([2,3])),
(set([3,4]), set([5,4]))]
for a, b in unequal_pairs:
self.assertRaises(self.failureException, self.assertEqual, a, b)
self.assertRaises(self.failureException, self.assertEqual, a, b,
'foo')
self.assertRaises(self.failureException, self.assertEqual, a, b,
msg='foo')
def testEquality(self):
self.assertListEqual([], [])
self.assertTupleEqual((), ())
self.assertSequenceEqual([], ())
a = [0, 'a', []]
b = []
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, a, b)
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, tuple(a), tuple(b))
self.assertRaises(unittest.TestCase.failureException,
self.assertSequenceEqual, a, tuple(b))
b.extend(a)
self.assertListEqual(a, b)
self.assertTupleEqual(tuple(a), tuple(b))
self.assertSequenceEqual(a, tuple(b))
self.assertSequenceEqual(tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual,
a, tuple(b))
self.assertRaises(self.failureException, self.assertTupleEqual,
tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual, None, b)
self.assertRaises(self.failureException, self.assertTupleEqual, None,
tuple(b))
self.assertRaises(self.failureException, self.assertSequenceEqual,
None, tuple(b))
self.assertRaises(self.failureException, self.assertListEqual, 1, 1)
self.assertRaises(self.failureException, self.assertTupleEqual, 1, 1)
self.assertRaises(self.failureException, self.assertSequenceEqual,
1, 1)
self.assertDictEqual({}, {})
c = { 'x': 1 }
d = {}
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d)
d.update(c)
self.assertDictEqual(c, d)
d['x'] = 0
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d, 'These are unequal')
self.assertRaises(self.failureException, self.assertDictEqual, None, d)
self.assertRaises(self.failureException, self.assertDictEqual, [], d)
self.assertRaises(self.failureException, self.assertDictEqual, 1, 1)
def testAssertSequenceEqualMaxDiff(self):
self.assertEqual(self.maxDiff, 80*8)
seq1 = 'a' + 'x' * 80**2
seq2 = 'b' + 'x' * 80**2
diff = '\n'.join(difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
# the +1 is the leading \n added by assertSequenceEqual
omitted = unittest.case.DIFF_OMITTED % (len(diff) + 1,)
self.maxDiff = len(diff)//2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertLess(len(msg), len(diff))
self.assertIn(omitted, msg)
self.maxDiff = len(diff) * 2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertGreater(len(msg), len(diff))
self.assertNotIn(omitted, msg)
self.maxDiff = None
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertGreater(len(msg), len(diff))
self.assertNotIn(omitted, msg)
def testTruncateMessage(self):
self.maxDiff = 1
message = self._truncateMessage('foo', 'bar')
omitted = unittest.case.DIFF_OMITTED % len('bar')
self.assertEqual(message, 'foo' + omitted)
self.maxDiff = None
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
self.maxDiff = 4
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
def testAssertDictEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertDictEqual({}, {1: 0})
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertDictEqual did not fail')
def testAssertMultiLineEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertMultiLineEqual('foo', 'bar')
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertMultiLineEqual did not fail')
def testAssertEqual_diffThreshold(self):
# check threshold value
self.assertEqual(self._diffThreshold, 2**16)
# disable madDiff to get diff markers
self.maxDiff = None
# set a lower threshold value and add a cleanup to restore it
old_threshold = self._diffThreshold
self._diffThreshold = 2**8
self.addCleanup(lambda: setattr(self, '_diffThreshold', old_threshold))
# under the threshold: diff marker (^) in error message
s = 'x' * (2**7)
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s + 'a', s + 'b')
self.assertIn('^', str(cm.exception))
self.assertEqual(s + 'a', s + 'a')
# over the threshold: diff not used and marker (^) not in error message
s = 'x' * (2**9)
# if the path that uses difflib is taken, _truncateMessage will be
# called -- replace it with explodingTruncation to verify that this
# doesn't happen
def explodingTruncation(message, diff):
raise SystemError('this should not be raised')
old_truncate = self._truncateMessage
self._truncateMessage = explodingTruncation
self.addCleanup(lambda: setattr(self, '_truncateMessage', old_truncate))
s1, s2 = s + 'a', s + 'b'
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
self.assertNotIn('^', str(cm.exception))
self.assertEqual(str(cm.exception), '%r != %r' % (s1, s2))
self.assertEqual(s + 'a', s + 'a')
def testAssertCountEqual(self):
a = object()
self.assertCountEqual([1, 2, 3], [3, 2, 1])
self.assertCountEqual(['foo', 'bar', 'baz'], ['bar', 'baz', 'foo'])
self.assertCountEqual([a, a, 2, 2, 3], (a, 2, 3, a, 2))
self.assertCountEqual([1, "2", "a", "a"], ["a", "2", True, "a"])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 2] + [3] * 100, [1] * 100 + [2, 3])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, "2", "a", "a"], ["a", "2", True, 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[10], [10, 11])
self.assertRaises(self.failureException, self.assertCountEqual,
[10, 11], [10])
self.assertRaises(self.failureException, self.assertCountEqual,
[10, 11, 10], [10, 11])
# Test that sequences of unhashable objects can be tested for sameness:
self.assertCountEqual([[1, 2], [3, 4], 0], [False, [3, 4], [1, 2]])
# Test that iterator of unhashable objects can be tested for sameness:
self.assertCountEqual(iter([1, 2, [], 3, 4]),
iter([1, 2, [], 3, 4]))
# hashable types, but not orderable
self.assertRaises(self.failureException, self.assertCountEqual,
[], [divmod, 'x', 1, 5j, 2j, frozenset()])
# comparing dicts
self.assertCountEqual([{'a': 1}, {'b': 2}], [{'b': 2}, {'a': 1}])
# comparing heterogenous non-hashable sequences
self.assertCountEqual([1, 'x', divmod, []], [divmod, [], 'x', 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[], [divmod, [], 'x', 1, 5j, 2j, set()])
self.assertRaises(self.failureException, self.assertCountEqual,
[[1]], [[2]])
# Same elements, but not same sequence length
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 1, 2], [2, 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 1, "2", "a", "a"], ["2", "2", True, "a"])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, {'b': 2}, None, True], [{'b': 2}, True, None])
# Same elements which don't reliably compare, in
# different order, see issue 10242
a = [{2,4}, {1,2}]
b = a[::-1]
self.assertCountEqual(a, b)
# test utility functions supporting assertCountEqual()
diffs = set(unittest.util._count_diff_all_purpose('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
diffs = unittest.util._count_diff_all_purpose([[]], [])
self.assertEqual(diffs, [(1, 0, [])])
diffs = set(unittest.util._count_diff_hashable('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
def testAssertSetEqual(self):
set1 = set()
set2 = set()
self.assertSetEqual(set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, None, set2)
self.assertRaises(self.failureException, self.assertSetEqual, [], set2)
self.assertRaises(self.failureException, self.assertSetEqual, set1, None)
self.assertRaises(self.failureException, self.assertSetEqual, set1, [])
set1 = set(['a'])
set2 = set()
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = set(['a'])
self.assertSetEqual(set1, set2)
set1 = set(['a'])
set2 = set(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = frozenset(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a', 'b'])
set2 = frozenset(['a', 'b'])
self.assertSetEqual(set1, set2)
set1 = set()
set2 = "foo"
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, set2, set1)
# make sure any string formatting is tuple-safe
set1 = set([(0, 1), (2, 3)])
set2 = set([(4, 5)])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
def testInequality(self):
# Try ints
self.assertGreater(2, 1)
self.assertGreaterEqual(2, 1)
self.assertGreaterEqual(1, 1)
self.assertLess(1, 2)
self.assertLessEqual(1, 2)
self.assertLessEqual(1, 1)
self.assertRaises(self.failureException, self.assertGreater, 1, 2)
self.assertRaises(self.failureException, self.assertGreater, 1, 1)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1, 2)
self.assertRaises(self.failureException, self.assertLess, 2, 1)
self.assertRaises(self.failureException, self.assertLess, 1, 1)
self.assertRaises(self.failureException, self.assertLessEqual, 2, 1)
# Try Floats
self.assertGreater(1.1, 1.0)
self.assertGreaterEqual(1.1, 1.0)
self.assertGreaterEqual(1.0, 1.0)
self.assertLess(1.0, 1.1)
self.assertLessEqual(1.0, 1.1)
self.assertLessEqual(1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertLess, 1.1, 1.0)
self.assertRaises(self.failureException, self.assertLess, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertLessEqual, 1.1, 1.0)
# Try Strings
self.assertGreater('bug', 'ant')
self.assertGreaterEqual('bug', 'ant')
self.assertGreaterEqual('ant', 'ant')
self.assertLess('ant', 'bug')
self.assertLessEqual('ant', 'bug')
self.assertLessEqual('ant', 'ant')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertLess, 'bug', 'ant')
self.assertRaises(self.failureException, self.assertLess, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertLessEqual, 'bug', 'ant')
# Try bytes
self.assertGreater(b'bug', b'ant')
self.assertGreaterEqual(b'bug', b'ant')
self.assertGreaterEqual(b'ant', b'ant')
self.assertLess(b'ant', b'bug')
self.assertLessEqual(b'ant', b'bug')
self.assertLessEqual(b'ant', b'ant')
self.assertRaises(self.failureException, self.assertGreater, b'ant', b'bug')
self.assertRaises(self.failureException, self.assertGreater, b'ant', b'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, b'ant',
b'bug')
self.assertRaises(self.failureException, self.assertLess, b'bug', b'ant')
self.assertRaises(self.failureException, self.assertLess, b'ant', b'ant')
self.assertRaises(self.failureException, self.assertLessEqual, b'bug', b'ant')
def testAssertMultiLineEqual(self):
sample_text = """\
http://www.python.org/doc/2.3/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...]
"""
revised_sample_text = """\
http://www.python.org/doc/2.4.1/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...] You may provide your
own implementation that does not subclass from TestCase, of course.
"""
sample_text_error = """\
- http://www.python.org/doc/2.3/lib/module-unittest.html
? ^
+ http://www.python.org/doc/2.4.1/lib/module-unittest.html
? ^^^
test case
- A test case is the smallest unit of testing. [...]
+ A test case is the smallest unit of testing. [...] You may provide your
? +++++++++++++++++++++
+ own implementation that does not subclass from TestCase, of course.
"""
self.maxDiff = None
try:
self.assertMultiLineEqual(sample_text, revised_sample_text)
except self.failureException as e:
# need to remove the first line of the error message
error = str(e).split('\n', 1)[1]
# no fair testing ourself with ourself, and assertEqual is used for strings
# so can't use assertEqual either. Just use assertTrue.
self.assertTrue(sample_text_error == error)
def testAsertEqualSingleLine(self):
sample_text = "laden swallows fly slowly"
revised_sample_text = "unladen swallows fly quickly"
sample_text_error = """\
- laden swallows fly slowly
? ^^^^
+ unladen swallows fly quickly
? ++ ^^^^^
"""
try:
self.assertEqual(sample_text, revised_sample_text)
except self.failureException as e:
error = str(e).split('\n', 1)[1]
self.assertTrue(sample_text_error == error)
def testAssertIsNone(self):
self.assertIsNone(None)
self.assertRaises(self.failureException, self.assertIsNone, False)
self.assertIsNotNone('DjZoPloGears on Rails')
self.assertRaises(self.failureException, self.assertIsNotNone, None)
def testAssertRegex(self):
self.assertRegex('asdfabasdf', r'ab+')
self.assertRaises(self.failureException, self.assertRegex,
'saaas', r'aaaa')
def testAssertRaisesRegex(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
self.assertRaisesRegex(ExceptionMock, re.compile('expect$'), Stub)
self.assertRaisesRegex(ExceptionMock, 'expect$', Stub)
def testAssertNotRaisesRegex(self):
self.assertRaisesRegex(
self.failureException, '^Exception not raised by <lambda>$',
self.assertRaisesRegex, Exception, re.compile('x'),
lambda: None)
self.assertRaisesRegex(
self.failureException, '^Exception not raised by <lambda>$',
self.assertRaisesRegex, Exception, 'x',
lambda: None)
def testAssertRaisesRegexMismatch(self):
def Stub():
raise Exception('Unexpected')
self.assertRaisesRegex(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegex, Exception, '^Expected$',
Stub)
self.assertRaisesRegex(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegex, Exception,
re.compile('^Expected$'), Stub)
def testAssertRaisesExcValue(self):
class ExceptionMock(Exception):
pass
def Stub(foo):
raise ExceptionMock(foo)
v = "particular value"
ctx = self.assertRaises(ExceptionMock)
with ctx:
Stub(v)
e = ctx.exception
self.assertIsInstance(e, ExceptionMock)
self.assertEqual(e.args[0], v)
def testAssertWarnsCallable(self):
def _runtime_warn():
warnings.warn("foo", RuntimeWarning)
# Success when the right warning is triggered, even several times
self.assertWarns(RuntimeWarning, _runtime_warn)
self.assertWarns(RuntimeWarning, _runtime_warn)
# A tuple of warning classes is accepted
self.assertWarns((DeprecationWarning, RuntimeWarning), _runtime_warn)
# *args and **kwargs also work
self.assertWarns(RuntimeWarning,
warnings.warn, "foo", category=RuntimeWarning)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
self.assertWarns(RuntimeWarning, lambda: 0)
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
self.assertWarns(DeprecationWarning, _runtime_warn)
# Filters for other warnings are not modified
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises(RuntimeWarning):
self.assertWarns(DeprecationWarning, _runtime_warn)
def testAssertWarnsContext(self):
# Believe it or not, it is preferable to duplicate all tests above,
# to make sure the __warningregistry__ $@ is circumvented correctly.
def _runtime_warn():
warnings.warn("foo", RuntimeWarning)
_runtime_warn_lineno = inspect.getsourcelines(_runtime_warn)[1]
with self.assertWarns(RuntimeWarning) as cm:
_runtime_warn()
# A tuple of warning classes is accepted
with self.assertWarns((DeprecationWarning, RuntimeWarning)) as cm:
_runtime_warn()
# The context manager exposes various useful attributes
self.assertIsInstance(cm.warning, RuntimeWarning)
self.assertEqual(cm.warning.args[0], "foo")
self.assertIn("test_case.py", cm.filename)
self.assertEqual(cm.lineno, _runtime_warn_lineno + 1)
# Same with several warnings
with self.assertWarns(RuntimeWarning):
_runtime_warn()
_runtime_warn()
with self.assertWarns(RuntimeWarning):
warnings.warn("foo", category=RuntimeWarning)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
with self.assertWarns(RuntimeWarning):
pass
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
with self.assertWarns(DeprecationWarning):
_runtime_warn()
# Filters for other warnings are not modified
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises(RuntimeWarning):
with self.assertWarns(DeprecationWarning):
_runtime_warn()
def testAssertWarnsRegexCallable(self):
def _runtime_warn(msg):
warnings.warn(msg, RuntimeWarning)
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "foox")
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
self.assertWarnsRegex(RuntimeWarning, "o+",
lambda: 0)
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
self.assertWarnsRegex(DeprecationWarning, "o+",
_runtime_warn, "foox")
# Failure when message doesn't match
with self.assertRaises(self.failureException):
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "barz")
# A little trickier: we ask RuntimeWarnings to be raised, and then
# check for some of them. It is implementation-defined whether
# non-matching RuntimeWarnings are simply re-raised, or produce a
# failureException.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises((RuntimeWarning, self.failureException)):
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "barz")
def testAssertWarnsRegexContext(self):
# Same as above, but with assertWarnsRegex as a context manager
def _runtime_warn(msg):
warnings.warn(msg, RuntimeWarning)
_runtime_warn_lineno = inspect.getsourcelines(_runtime_warn)[1]
with self.assertWarnsRegex(RuntimeWarning, "o+") as cm:
_runtime_warn("foox")
self.assertIsInstance(cm.warning, RuntimeWarning)
self.assertEqual(cm.warning.args[0], "foox")
self.assertIn("test_case.py", cm.filename)
self.assertEqual(cm.lineno, _runtime_warn_lineno + 1)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
pass
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(DeprecationWarning, "o+"):
_runtime_warn("foox")
# Failure when message doesn't match
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
_runtime_warn("barz")
# A little trickier: we ask RuntimeWarnings to be raised, and then
# check for some of them. It is implementation-defined whether
# non-matching RuntimeWarnings are simply re-raised, or produce a
# failureException.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises((RuntimeWarning, self.failureException)):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
_runtime_warn("barz")
def testDeprecatedMethodNames(self):
"""
Test that the deprecated methods raise a DeprecationWarning. See #9424.
"""
old = (
(self.failIfEqual, (3, 5)),
(self.assertNotEquals, (3, 5)),
(self.failUnlessEqual, (3, 3)),
(self.assertEquals, (3, 3)),
(self.failUnlessAlmostEqual, (2.0, 2.0)),
(self.assertAlmostEquals, (2.0, 2.0)),
(self.failIfAlmostEqual, (3.0, 5.0)),
(self.assertNotAlmostEquals, (3.0, 5.0)),
(self.failUnless, (True,)),
(self.assert_, (True,)),
(self.failUnlessRaises, (TypeError, lambda _: 3.14 + 'spam')),
(self.failIf, (False,)),
(self.assertDictContainsSubset, (dict(a=1, b=2), dict(a=1, b=2, c=3))),
(self.assertRaisesRegexp, (KeyError, 'foo', lambda: {}['foo'])),
(self.assertRegexpMatches, ('bar', 'bar')),
)
for meth, args in old:
with self.assertWarns(DeprecationWarning):
meth(*args)
# disable this test for now. When the version where the fail* methods will
# be removed is decided, re-enable it and update the version
def _testDeprecatedFailMethods(self):
"""Test that the deprecated fail* methods get removed in 3.x"""
if sys.version_info[:2] < (3, 3):
return
deprecated_names = [
'failIfEqual', 'failUnlessEqual', 'failUnlessAlmostEqual',
'failIfAlmostEqual', 'failUnless', 'failUnlessRaises', 'failIf',
'assertDictContainsSubset',
]
for deprecated_name in deprecated_names:
with self.assertRaises(AttributeError):
getattr(self, deprecated_name) # remove these in 3.x
def testDeepcopy(self):
# Issue: 5660
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
# This shouldn't blow up
deepcopy(test)
def testPickle(self):
# Issue 10326
# Can't use TestCase classes defined in Test class as
# pickle does not work with inner classes
test = unittest.TestCase('run')
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
# blew up prior to fix
pickled_test = pickle.dumps(test, protocol=protocol)
unpickled_test = pickle.loads(pickled_test)
self.assertEqual(test, unpickled_test)
# exercise the TestCase instance in a way that will invoke
# the type equality lookup mechanism
unpickled_test.assertEqual(set(), set())
def testKeyboardInterrupt(self):
def _raise(self=None):
raise KeyboardInterrupt
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
with self.assertRaises(KeyboardInterrupt):
klass('test_something').run()
def testSkippingEverywhere(self):
def _skip(self=None):
raise unittest.SkipTest('some reason')
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _skip
class Test2(unittest.TestCase):
setUp = _skip
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _skip
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_skip)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.skipped), 1)
self.assertEqual(result.testsRun, 1)
def testSystemExit(self):
def _raise(self=None):
raise SystemExit
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.errors), 1)
self.assertEqual(result.testsRun, 1)
@support.cpython_only
def testNoCycles(self):
case = unittest.TestCase()
wr = weakref.ref(case)
with support.disable_gc():
del case
self.assertFalse(wr())
|
|
from unittest.case import TestCase
from tracker.domain import ContestFactory, Participant, Fact, ContestValidator,\
ValidationException, ContestRanker
class TestDomainMuurkeKlop(TestCase):
''' This test case covers how we want to create and keep track of a single game of Muurke Klop.
- N-down: N Participants compete in a single game of (N-1) games, where each Participant will receive points depending on his rank
- X-strikes out: N Participants compete in ((N - 1) * X) to ((N * X) - 1) games
'''
def testBasicNDown(self):
# Note: we do not create a GameTemplate, as that's not our core concern.
# We'll add a "Competition flow" concept later on!
# At this point we just want to store the Facts / Events for a game and be done with it!
game = ContestFactory.createContest('Muurke Klop - 10 Down')
participants = self.__createParticipants()
for p in participants:
game.addParticipant(p)
# Once all Participants have been added, we can start adding Facts!
game.addFact(Fact('Points', 1, participants[2]))
game.addFact(Fact('Points', 2, participants[7]))
game.addFact(Fact('Points', 3, participants[6]))
game.addFact(Fact('Points', 4, participants[5]))
game.addFact(Fact('Points', 5, participants[4]))
game.addFact(Fact('Points', 6, participants[1]))
game.addFact(Fact('Points', 8, participants[0]))
game.addFact(Fact('Points', 10, participants[3]))
# This is all the input we need in order to create a ranking!
self.assertEqual(8, len(game.facts))
self.__assertFact(game, participants[0], 8)
self.__assertFact(game, participants[1], 6)
self.__assertFact(game, participants[2], 1)
def testValidation(self):
''' Make sure that the Game state remains valid while adding or removing Facts. '''
game = ContestFactory.createContest('Muurke Klop - 10 Down')
validator = ContestValidator()
validator.addRule(ContestValidator.minParticipants('Game', 2))
validator.addRule(ContestValidator.maxParticipants('Game', 8))
def validatePoints(game):
''' Validation function for the "Points" Facts of a game of Muurke Klop '''
validPoints = set([1, 2, 3, 4, 5, 6, 8, 10])
gamePoints = [fact for fact in game.facts if 'Points' == fact.type]
# Points value should be within a valid range
orderedPoints = set(sorted([points.value for points in gamePoints]))
diff = [points for points in orderedPoints if points not in validPoints]
if (len(diff) > 0):
return 'Invalid Points value(s) have been added: %s' % str(diff)
# Points values should be consecutive, no gaps should be left!
if (len(orderedPoints) > 0):
diff = [points for points in validPoints if points not in orderedPoints]
diff = [d for d in diff if d < max(orderedPoints)] # TODO :: put into a single line?
if (len(diff) > 0):
return 'Invalid Points, missing values: %s' % str(diff)
# Each Participant can have only one single Points Fact
seen = set()
seen_add = seen.add
# adds all elements it doesn't know yet to seen and all other to dupes
dupes = list(set(f.participant.name for f in gamePoints if f.participant in seen or seen_add(f.participant)))
if (len(dupes) > 0):
return 'Duplicate Points: Participants %s already have Points assigned' % str(dupes)
# Each Points value can be assigned only once!
seen = set()
seen_add = seen.add
# adds all elements it doesn't know yet to seen and all other to dupes
dupes = list(set(f.value for f in gamePoints if f.value in seen or seen_add(f.value)))
if (len(dupes) > 0):
return 'Duplicate Points: Points values %s have already been assigned' % str(dupes)
return True
validator.addRule(validatePoints)
participants = self.__createParticipants()
game.addParticipant(participants[0])
try:
validator.validate(game)
self.fail('Game needs at least 2 Participants!')
except ValidationException as e:
self.assertEqual(str(e), 'Game needs at least 2 Participants!')
game.addParticipant(participants[1])
validator.validate(game)
# Points Facts should start by 1
try:
game.addFact(Fact('Points', 3, participants[0]))
validator.validate(game)
self.fail('The first Points assigned should be "1"')
except ValidationException as e:
self.assertEqual('Invalid Points, missing values: [1, 2]', str(e))
game.addFact(Fact('Points', 1, participants[2]))
game.addFact(Fact('Points', 2, participants[4]))
# Points Fact has already been added for this game, be it for a different Participant
try:
game.addFact(Fact('Points', 3, participants[1]))
validator.validate(game)
self.fail('3 Points have already been added for another Participant!')
except ValidationException as e:
self.assertEqual('Duplicate Points: Points values [3] have already been assigned', str(e))
game.clearFacts()
# Every Participant can have only 1 "Points" Fact per game...
try:
game.addFact(Fact('Points', 1, participants[0]))
game.addFact(Fact('Points', 2, participants[0]))
validator.validate(game)
self.fail('Duplicate Points: Participant "Player One" already has Points assigned!')
except ValidationException as e:
self.assertEqual('Duplicate Points: Participants [\'Player One\'] already have Points assigned', str(e))
game.clearFacts()
# Each Points Fact value can only occur once!
try:
game.addFact(Fact('Points', 1, participants[0]))
game.addFact(Fact('Points', 1, participants[1]))
validator.validate(game)
self.fail('Duplicate Points: Points value "1" has already been assigned!')
except ValidationException as e:
self.assertEqual('Duplicate Points: Points values [1] have already been assigned', str(e))
game.clearFacts()
# Adding an invalid Points Fact...
try:
game.addFact(Fact('Points', 7, participants[0]))
validator.validate(game)
self.fail('7 is not a valid Points value!')
except ValidationException as e:
self.assertEqual('Invalid Points value(s) have been added: [7]', str(e))
game.clearFacts()
# We need to let the game know what type of Facts are valid.
# TODO :: We'll do this by adding some sort of Validator instance...
# In the future, we could make this highly configurable through external configuration??
# TODO :: game.addParticipant
def testRankingComplete(self):
''' One of the most important things we want from a Contest is to get
a (final) ranking of all Participants.
For this purpose, we've created the ContestRanker.
'''
game = ContestFactory.createContest('Muurke Klop - 10 Down')
participants = self.__createParticipants()
for p in participants:
game.addParticipant(p)
expectedRank = 8
expectedRanking = {}
for participant, points in zip(participants, [1, 2, 3, 4, 5, 6, 8, 10]):
game.addFact(Fact('Points', points, participant))
expectedRanking[expectedRank] = [(participant, points)]
expectedRank -= 1
self.assertEqual(8, len(game.facts))
ranker = ContestRanker()
ranker.addRule(ContestRanker.byFactTypeValue('Points'))
ranking = ranker.rankParticipants(game)
# self.__printRanking(expectedRanking)
# self.__printRanking(ranking)
self.assertEqual(expectedRanking, ranking)
def testRankingOngoing(self):
''' In case of an ongoing game, we still want to be able to rank!
'''
game = ContestFactory.createContest('Muurke Klop - 10 Down')
participants = self.__createParticipants()
for p in participants:
game.addParticipant(p)
expectedRank = 5
expectedRanking = {}
for participant, points in zip(participants, [1, 2, 3, 4, 5]):
game.addFact(Fact('Points', points, participant))
expectedRanking[expectedRank] = [(participant, points)]
expectedRank -= 1
expectedRank = 6
expectedRanking[expectedRank] = []
for i in [5, 6, 7]: # Note: Participants will be ranked by Name (reversed!) in case of a tie...
expectedRanking[expectedRank].append((participants[i], 0))
self.assertEqual(5, len(game.facts))
ranker = ContestRanker()
ranker.addRule(ContestRanker.byFactTypeValue('Points'))
ranking = ranker.rankParticipants(game)
# self.__printRanking(expectedRanking)
# self.__printRanking(ranking)
self.assertEqual(expectedRanking, ranking)
def __printRanking(self, ranking):
for (r, a) in ranking.items():
for (p, s) in a:
print('%d: %s (%d)' % (r, p.name, s))
print('-----')
# So what is still missing?
# - final score / ranking, but not sure whether or not that's a concern for the Contest class...
# - Option to undo the a Fact of a given type, while still maintaining succession rules (so only highest/latest score can be undone)
# - validation: cannot add Participants once Scores have been given!?
# TODO: add test cases
#
def __createParticipants(self):
''' Create a list of default Participant value objects. '''
participants = []
participants.append(Participant('Player One'))
participants.append(Participant('Player Two'))
participants.append(Participant('Player Three'))
participants.append(Participant('Player Four'))
participants.append(Participant('Player Five'))
participants.append(Participant('Player Six'))
participants.append(Participant('Player Seven'))
participants.append(Participant('Player Eight'))
return participants
def __assertFact(self, game, participant, points):
for f in game.facts:
if ('Points' == f.type and participant.name == f.participant.name):
self.assertEquals(points, f.value, 'Expected Participant "%s" to have %d Points' % (participant.name, points))
return
raise AssertionError('Expected Participant "%s" to have %d Points, but no matching Fact was found' % (participant.name, points))
|
|
# huffman.py
# Author: Dixon Crews
# CSC 505-001, Fall 2016
# Homework 3, #5
###############################################################################
# Import needed libraries
import math, sys, binascii
###############################################################################
# Node class
class Node():
left = None
right = None
value = None
freq = 0
def __init__(self, v, f):
self.value = v
self.freq = f
###############################################################################
# Define the huffman() function
def huffman(h):
# Algorithm from textbook, p. 431
while len(h) > 1:
# Get two smallest nodes
x = heapRemoveMin(h)
y = heapRemoveMin(h)
# Create a new node with the sum of frequencies
z = Node(None, x.freq + y.freq)
# Set z.left and z.right to x and y
z.left = x
z.right = y
# Re-insert to heap
heapInsert(h, z)
# Get codes
# Code adapted from
# http://www.techrepublic.com/article/huffman-coding-in-python/
codes = {}
def getCodes(s, node):
if(node.value):
if(not s):
codes[node.value] = "0"
else:
codes[node.value] = s
else:
getCodes(s + "0", node.left)
getCodes(s + "1", node.right)
getCodes("", h[0])
return codes
# Tri-nary heap functions
# Define the parent() function
def parent(i):
return math.floor((i-1)/3)
# Define the left() function
def left(i):
return (3*i + 1)
# Define the middle() function
def middle(i):
return (3*i + 2)
# Define the right() function
def right(i):
return (3*i + 3)
# Define the minChild() function
def minChild(arr, i):
# Store some needed values
length = len(arr)
leftIdx = left(i)
middleIdx = middle(i)
rightIdx = right(i)
# Create boolean indicators for each child
hasLeft = (leftIdx < length)
hasMiddle = (middleIdx < length)
hasRight = (rightIdx < length)
# Need to find the minimum child to swap with
# Start at the left child
if(hasLeft):
minVal, minIdx = arr[leftIdx].freq, leftIdx
# Check middle (if exists)
if(hasMiddle):
if(arr[middleIdx].freq < minVal):
minVal, minIdx = arr[middleIdx].freq, middleIdx
# Check right (if exists)
if(hasRight):
if(arr[rightIdx].freq < minVal):
minVal, minIdx = arr[rightIdx].freq, rightIdx
return minIdx
else:
return 0
# Define the percolateUp() function
def percolateUp(arr, i):
while i > 0 and arr[parent(i)].freq > arr[i].freq:
arr[parent(i)], arr[i] = arr[i], arr[parent(i)]
i = parent(i)
# Define the percolateDown() function
def percolateDown(arr, i):
# Find the index of the minimum child (if any, 0 if none)
minIdx = minChild(arr, i)
# Go through the array and swap if needed
while minIdx != 0 and arr[i].freq > arr[minIdx].freq:
# Swap
arr[i], arr[minIdx] = arr[minIdx], arr[i]
# Re-assign i to be the index we swapped to
i = minIdx
# Find the next minimum child
minIdx = minChild(arr, minIdx)
# Define the heapInsert() function
def heapInsert(arr, value):
# Insert new element at the last position in the array
arr.append(value)
# Move the item into the correct position until the heap
# order property is satisfied
percolateUp(arr, len(arr) - 1)
# Define the heapRemoveMin() function
def heapRemoveMin(arr):
# Check size
if(len(arr) == 0):
return None
# Store root in temporary variable
root = arr[0]
# Move last element to root position
arr[0] = arr[len(arr) - 1]
# Delete last element from heap
del arr[len(arr) - 1]
# Move the root item into the correct position until the
# heap order property is satisfied
percolateDown(arr, 0)
return root
###############################################################################
# Generate all of the hex values from 0 to 255
# and store them in a dictionary with 1 as the value
hexDict = {}
for hexValue in [hex(x)[2:].zfill(2) for x in range(256)]:
hexDict[hexValue] = 1
# Create a dictionary to hold input
inputDict = {}
# Read input from stdin
while 1:
# Read 1 byte
data = sys.stdin.buffer.read(1)
# If we didn't get anything, we're at EOF
if(not data):
break
# Increment count or add to the dictionary if new
if(data in inputDict):
inputDict[data] += 1
else:
inputDict[data] = 1
# Go through the input values we saw and update their counts in hexDict
for item in inputDict:
hexDict[binascii.hexlify(item).decode('ascii')] = inputDict[item] + 1
# Add special EOF
hexDict['EOF'] = 1
# Insert into heap
heap = []
for item in hexDict:
heapInsert(heap, Node(item, hexDict[item]))
# Find Huffman codes
huff = huffman(heap)
# Print out codes
for item in sorted(huff):
if(item >= '21' and item <= '7e'):
print('{:>3} {}'.format(binascii.unhexlify(item).decode("utf-8"), huff[item]))
elif(item != 'EOF'):
print('{:>3} {}'.format(item.upper(), huff[item]))
print('EOF', huff['EOF'])
|
|
"""
This is the Django template system.
How it works:
The Lexer.tokenize() function converts a template string (i.e., a string containing
markup with custom template tags) to tokens, which can be either plain text
(TOKEN_TEXT), variables (TOKEN_VAR) or block statements (TOKEN_BLOCK).
The Parser() class takes a list of tokens in its constructor, and its parse()
method returns a compiled template -- which is, under the hood, a list of
Node objects.
Each Node is responsible for creating some sort of output -- e.g. simple text
(TextNode), variable values in a given context (VariableNode), results of basic
logic (IfNode), results of looping (ForNode), or anything else. The core Node
types are TextNode, VariableNode, IfNode and ForNode, but plugin modules can
define their own custom node types.
Each Node has a render() method, which takes a Context and returns a string of
the rendered node. For example, the render() method of a Variable Node returns
the variable's value as a string. The render() method of a ForNode returns the
rendered output of whatever was inside the loop, recursively.
The Template class is a convenient wrapper that takes care of template
compilation and rendering.
Usage:
The only thing you should ever use directly in this file is the Template class.
Create a compiled template object with a template_string, then call render()
with a context. In the compilation stage, the TemplateSyntaxError exception
will be raised if the template doesn't have proper syntax.
Sample code:
>>> from django import template
>>> s = '<html>{% if test %}<h1>{{ varvalue }}</h1>{% endif %}</html>'
>>> t = template.Template(s)
(t is now a compiled template, and its render() method can be called multiple
times with multiple contexts)
>>> c = template.Context({'test':True, 'varvalue': 'Hello'})
>>> t.render(c)
'<html><h1>Hello</h1></html>'
>>> c = template.Context({'test':False, 'varvalue': 'Hello'})
>>> t.render(c)
'<html></html>'
"""
import inspect
import logging
import re
from django.template.context import ( # NOQA: imported for backwards compatibility
BaseContext, Context, ContextPopException, RequestContext,
)
from django.utils.formats import localize
from django.utils.html import conditional_escape, escape
from django.utils.inspect import getargspec
from django.utils.safestring import SafeData, mark_safe
from django.utils.text import (
get_text_list, smart_split, unescape_string_literal,
)
from django.utils.timezone import template_localtime
from django.utils.translation import gettext_lazy, pgettext_lazy
from .exceptions import TemplateSyntaxError
TOKEN_TEXT = 0
TOKEN_VAR = 1
TOKEN_BLOCK = 2
TOKEN_COMMENT = 3
TOKEN_MAPPING = {
TOKEN_TEXT: 'Text',
TOKEN_VAR: 'Var',
TOKEN_BLOCK: 'Block',
TOKEN_COMMENT: 'Comment',
}
# template syntax constants
FILTER_SEPARATOR = '|'
FILTER_ARGUMENT_SEPARATOR = ':'
VARIABLE_ATTRIBUTE_SEPARATOR = '.'
BLOCK_TAG_START = '{%'
BLOCK_TAG_END = '%}'
VARIABLE_TAG_START = '{{'
VARIABLE_TAG_END = '}}'
COMMENT_TAG_START = '{#'
COMMENT_TAG_END = '#}'
TRANSLATOR_COMMENT_MARK = 'Translators'
SINGLE_BRACE_START = '{'
SINGLE_BRACE_END = '}'
# what to report as the origin for templates that come from non-loader sources
# (e.g. strings)
UNKNOWN_SOURCE = '<unknown source>'
# match a variable or block tag and capture the entire tag, including start/end
# delimiters
tag_re = (re.compile('(%s.*?%s|%s.*?%s|%s.*?%s)' %
(re.escape(BLOCK_TAG_START), re.escape(BLOCK_TAG_END),
re.escape(VARIABLE_TAG_START), re.escape(VARIABLE_TAG_END),
re.escape(COMMENT_TAG_START), re.escape(COMMENT_TAG_END))))
logger = logging.getLogger('django.template')
class VariableDoesNotExist(Exception):
def __init__(self, msg, params=()):
self.msg = msg
self.params = params
def __str__(self):
return self.msg % self.params
class Origin:
def __init__(self, name, template_name=None, loader=None):
self.name = name
self.template_name = template_name
self.loader = loader
def __str__(self):
return self.name
def __eq__(self, other):
if not isinstance(other, Origin):
return False
return (
self.name == other.name and
self.loader == other.loader
)
@property
def loader_name(self):
if self.loader:
return '%s.%s' % (
self.loader.__module__, self.loader.__class__.__name__,
)
class Template:
def __init__(self, template_string, origin=None, name=None, engine=None):
# If Template is instantiated directly rather than from an Engine and
# exactly one Django template engine is configured, use that engine.
# This is required to preserve backwards-compatibility for direct use
# e.g. Template('...').render(Context({...}))
if engine is None:
from .engine import Engine
engine = Engine.get_default()
if origin is None:
origin = Origin(UNKNOWN_SOURCE)
self.name = name
self.origin = origin
self.engine = engine
self.source = template_string
self.nodelist = self.compile_nodelist()
def __iter__(self):
for node in self.nodelist:
for subnode in node:
yield subnode
def _render(self, context):
return self.nodelist.render(context)
def render(self, context):
"Display stage -- can be called many times"
with context.render_context.push_state(self):
if context.template is None:
with context.bind_template(self):
context.template_name = self.name
return self._render(context)
else:
return self._render(context)
def compile_nodelist(self):
"""
Parse and compile the template source into a nodelist. If debug
is True and an exception occurs during parsing, the exception is
is annotated with contextual line information where it occurred in the
template source.
"""
if self.engine.debug:
lexer = DebugLexer(self.source)
else:
lexer = Lexer(self.source)
tokens = lexer.tokenize()
parser = Parser(
tokens, self.engine.template_libraries, self.engine.template_builtins,
self.origin,
)
try:
return parser.parse()
except Exception as e:
if self.engine.debug:
e.template_debug = self.get_exception_info(e, e.token)
raise
def get_exception_info(self, exception, token):
"""
Return a dictionary containing contextual line information of where
the exception occurred in the template. The following information is
provided:
message
The message of the exception raised.
source_lines
The lines before, after, and including the line the exception
occurred on.
line
The line number the exception occurred on.
before, during, after
The line the exception occurred on split into three parts:
1. The content before the token that raised the error.
2. The token that raised the error.
3. The content after the token that raised the error.
total
The number of lines in source_lines.
top
The line number where source_lines starts.
bottom
The line number where source_lines ends.
start
The start position of the token in the template source.
end
The end position of the token in the template source.
"""
start, end = token.position
context_lines = 10
line = 0
upto = 0
source_lines = []
before = during = after = ""
for num, next in enumerate(linebreak_iter(self.source)):
if start >= upto and end <= next:
line = num
before = escape(self.source[upto:start])
during = escape(self.source[start:end])
after = escape(self.source[end:next])
source_lines.append((num, escape(self.source[upto:next])))
upto = next
total = len(source_lines)
top = max(1, line - context_lines)
bottom = min(total, line + 1 + context_lines)
# In some rare cases exc_value.args can be empty or an invalid
# string.
try:
message = str(exception.args[0])
except (IndexError, UnicodeDecodeError):
message = '(Could not get exception message)'
return {
'message': message,
'source_lines': source_lines[top:bottom],
'before': before,
'during': during,
'after': after,
'top': top,
'bottom': bottom,
'total': total,
'line': line,
'name': self.origin.name,
'start': start,
'end': end,
}
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p + 1
p = template_source.find('\n', p + 1)
yield len(template_source) + 1
class Token:
def __init__(self, token_type, contents, position=None, lineno=None):
"""
A token representing a string from the template.
token_type
One of TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK, or TOKEN_COMMENT.
contents
The token source string.
position
An optional tuple containing the start and end index of the token
in the template source. This is used for traceback information
when debug is on.
lineno
The line number the token appears on in the template source.
This is used for traceback information and gettext files.
"""
self.token_type, self.contents = token_type, contents
self.lineno = lineno
self.position = position
def __str__(self):
token_name = TOKEN_MAPPING[self.token_type]
return ('<%s token: "%s...">' %
(token_name, self.contents[:20].replace('\n', '')))
def split_contents(self):
split = []
bits = iter(smart_split(self.contents))
for bit in bits:
# Handle translation-marked template pieces
if bit.startswith(('_("', "_('")):
sentinel = bit[2] + ')'
trans_bit = [bit]
while not bit.endswith(sentinel):
bit = next(bits)
trans_bit.append(bit)
bit = ' '.join(trans_bit)
split.append(bit)
return split
class Lexer:
def __init__(self, template_string):
self.template_string = template_string
self.verbatim = False
def tokenize(self):
"""
Return a list of tokens from a given template_string.
"""
in_tag = False
lineno = 1
result = []
for bit in tag_re.split(self.template_string):
if bit:
result.append(self.create_token(bit, None, lineno, in_tag))
in_tag = not in_tag
lineno += bit.count('\n')
return result
def create_token(self, token_string, position, lineno, in_tag):
"""
Convert the given token string into a new Token object and return it.
If in_tag is True, we are processing something that matched a tag,
otherwise it should be treated as a literal string.
"""
if in_tag and token_string.startswith(BLOCK_TAG_START):
# The [2:-2] ranges below strip off *_TAG_START and *_TAG_END.
# We could do len(BLOCK_TAG_START) to be more "correct", but we've
# hard-coded the 2s here for performance. And it's not like
# the TAG_START values are going to change anytime, anyway.
block_content = token_string[2:-2].strip()
if self.verbatim and block_content == self.verbatim:
self.verbatim = False
if in_tag and not self.verbatim:
if token_string.startswith(VARIABLE_TAG_START):
token = Token(TOKEN_VAR, token_string[2:-2].strip(), position, lineno)
elif token_string.startswith(BLOCK_TAG_START):
if block_content[:9] in ('verbatim', 'verbatim '):
self.verbatim = 'end%s' % block_content
token = Token(TOKEN_BLOCK, block_content, position, lineno)
elif token_string.startswith(COMMENT_TAG_START):
content = ''
if token_string.find(TRANSLATOR_COMMENT_MARK):
content = token_string[2:-2].strip()
token = Token(TOKEN_COMMENT, content, position, lineno)
else:
token = Token(TOKEN_TEXT, token_string, position, lineno)
return token
class DebugLexer(Lexer):
def tokenize(self):
"""
Split a template string into tokens and annotates each token with its
start and end position in the source. This is slower than the default
lexer so only use it when debug is True.
"""
lineno = 1
result = []
upto = 0
for match in tag_re.finditer(self.template_string):
start, end = match.span()
if start > upto:
token_string = self.template_string[upto:start]
result.append(self.create_token(token_string, (upto, start), lineno, in_tag=False))
lineno += token_string.count('\n')
upto = start
token_string = self.template_string[start:end]
result.append(self.create_token(token_string, (start, end), lineno, in_tag=True))
lineno += token_string.count('\n')
upto = end
last_bit = self.template_string[upto:]
if last_bit:
result.append(self.create_token(last_bit, (upto, upto + len(last_bit)), lineno, in_tag=False))
return result
class Parser:
def __init__(self, tokens, libraries=None, builtins=None, origin=None):
self.tokens = tokens
self.tags = {}
self.filters = {}
self.command_stack = []
if libraries is None:
libraries = {}
if builtins is None:
builtins = []
self.libraries = libraries
for builtin in builtins:
self.add_library(builtin)
self.origin = origin
def parse(self, parse_until=None):
"""
Iterate through the parser tokens and compiles each one into a node.
If parse_until is provided, parsing will stop once one of the
specified tokens has been reached. This is formatted as a list of
tokens, e.g. ['elif', 'else', 'endif']. If no matching token is
reached, raise an exception with the unclosed block tag details.
"""
if parse_until is None:
parse_until = []
nodelist = NodeList()
while self.tokens:
token = self.next_token()
# Use the raw values here for TOKEN_* for a tiny performance boost.
if token.token_type == 0: # TOKEN_TEXT
self.extend_nodelist(nodelist, TextNode(token.contents), token)
elif token.token_type == 1: # TOKEN_VAR
if not token.contents:
raise self.error(token, 'Empty variable tag on line %d' % token.lineno)
try:
filter_expression = self.compile_filter(token.contents)
except TemplateSyntaxError as e:
raise self.error(token, e)
var_node = VariableNode(filter_expression)
self.extend_nodelist(nodelist, var_node, token)
elif token.token_type == 2: # TOKEN_BLOCK
try:
command = token.contents.split()[0]
except IndexError:
raise self.error(token, 'Empty block tag on line %d' % token.lineno)
if command in parse_until:
# A matching token has been reached. Return control to
# the caller. Put the token back on the token list so the
# caller knows where it terminated.
self.prepend_token(token)
return nodelist
# Add the token to the command stack. This is used for error
# messages if further parsing fails due to an unclosed block
# tag.
self.command_stack.append((command, token))
# Get the tag callback function from the ones registered with
# the parser.
try:
compile_func = self.tags[command]
except KeyError:
self.invalid_block_tag(token, command, parse_until)
# Compile the callback into a node object and add it to
# the node list.
try:
compiled_result = compile_func(self, token)
except Exception as e:
raise self.error(token, e)
self.extend_nodelist(nodelist, compiled_result, token)
# Compile success. Remove the token from the command stack.
self.command_stack.pop()
if parse_until:
self.unclosed_block_tag(parse_until)
return nodelist
def skip_past(self, endtag):
while self.tokens:
token = self.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == endtag:
return
self.unclosed_block_tag([endtag])
def extend_nodelist(self, nodelist, node, token):
# Check that non-text nodes don't appear before an extends tag.
if node.must_be_first and nodelist.contains_nontext:
raise self.error(
token, '%r must be the first tag in the template.' % node,
)
if isinstance(nodelist, NodeList) and not isinstance(node, TextNode):
nodelist.contains_nontext = True
# Set origin and token here since we can't modify the node __init__()
# method.
node.token = token
node.origin = self.origin
nodelist.append(node)
def error(self, token, e):
"""
Return an exception annotated with the originating token. Since the
parser can be called recursively, check if a token is already set. This
ensures the innermost token is highlighted if an exception occurs,
e.g. a compile error within the body of an if statement.
"""
if not isinstance(e, Exception):
e = TemplateSyntaxError(e)
if not hasattr(e, 'token'):
e.token = token
return e
def invalid_block_tag(self, token, command, parse_until=None):
if parse_until:
raise self.error(
token,
"Invalid block tag on line %d: '%s', expected %s. Did you "
"forget to register or load this tag?" % (
token.lineno,
command,
get_text_list(["'%s'" % p for p in parse_until], 'or'),
),
)
raise self.error(
token,
"Invalid block tag on line %d: '%s'. Did you forget to register "
"or load this tag?" % (token.lineno, command)
)
def unclosed_block_tag(self, parse_until):
command, token = self.command_stack.pop()
msg = "Unclosed tag on line %d: '%s'. Looking for one of: %s." % (
token.lineno,
command,
', '.join(parse_until),
)
raise self.error(token, msg)
def next_token(self):
return self.tokens.pop(0)
def prepend_token(self, token):
self.tokens.insert(0, token)
def delete_first_token(self):
del self.tokens[0]
def add_library(self, lib):
self.tags.update(lib.tags)
self.filters.update(lib.filters)
def compile_filter(self, token):
"""
Convenient wrapper for FilterExpression
"""
return FilterExpression(token, self)
def find_filter(self, filter_name):
if filter_name in self.filters:
return self.filters[filter_name]
else:
raise TemplateSyntaxError("Invalid filter: '%s'" % filter_name)
# This only matches constant *strings* (things in quotes or marked for
# translation). Numbers are treated as variables for implementation reasons
# (so that they retain their type when passed to filters).
constant_string = r"""
(?:%(i18n_open)s%(strdq)s%(i18n_close)s|
%(i18n_open)s%(strsq)s%(i18n_close)s|
%(strdq)s|
%(strsq)s)
""" % {
'strdq': r'"[^"\\]*(?:\\.[^"\\]*)*"', # double-quoted string
'strsq': r"'[^'\\]*(?:\\.[^'\\]*)*'", # single-quoted string
'i18n_open': re.escape("_("),
'i18n_close': re.escape(")"),
}
constant_string = constant_string.replace("\n", "")
filter_raw_string = r"""
^(?P<constant>%(constant)s)|
^(?P<var>[%(var_chars)s]+|%(num)s)|
(?:\s*%(filter_sep)s\s*
(?P<filter_name>\w+)
(?:%(arg_sep)s
(?:
(?P<constant_arg>%(constant)s)|
(?P<var_arg>[%(var_chars)s]+|%(num)s)
)
)?
)""" % {
'constant': constant_string,
'num': r'[-+\.]?\d[\d\.e]*',
'var_chars': r'\w\.',
'filter_sep': re.escape(FILTER_SEPARATOR),
'arg_sep': re.escape(FILTER_ARGUMENT_SEPARATOR),
}
filter_re = re.compile(filter_raw_string, re.VERBOSE)
class FilterExpression:
"""
Parse a variable token and its optional filters (all as a single string),
and return a list of tuples of the filter name and arguments.
Sample::
>>> token = 'variable|default:"Default value"|date:"Y-m-d"'
>>> p = Parser('')
>>> fe = FilterExpression(token, p)
>>> len(fe.filters)
2
>>> fe.var
<Variable: 'variable'>
"""
def __init__(self, token, parser):
self.token = token
matches = filter_re.finditer(token)
var_obj = None
filters = []
upto = 0
for match in matches:
start = match.start()
if upto != start:
raise TemplateSyntaxError("Could not parse some characters: "
"%s|%s|%s" %
(token[:upto], token[upto:start],
token[start:]))
if var_obj is None:
var, constant = match.group("var", "constant")
if constant:
try:
var_obj = Variable(constant).resolve({})
except VariableDoesNotExist:
var_obj = None
elif var is None:
raise TemplateSyntaxError("Could not find variable at "
"start of %s." % token)
else:
var_obj = Variable(var)
else:
filter_name = match.group("filter_name")
args = []
constant_arg, var_arg = match.group("constant_arg", "var_arg")
if constant_arg:
args.append((False, Variable(constant_arg).resolve({})))
elif var_arg:
args.append((True, Variable(var_arg)))
filter_func = parser.find_filter(filter_name)
self.args_check(filter_name, filter_func, args)
filters.append((filter_func, args))
upto = match.end()
if upto != len(token):
raise TemplateSyntaxError("Could not parse the remainder: '%s' "
"from '%s'" % (token[upto:], token))
self.filters = filters
self.var = var_obj
def resolve(self, context, ignore_failures=False):
if isinstance(self.var, Variable):
try:
obj = self.var.resolve(context)
except VariableDoesNotExist:
if ignore_failures:
obj = None
else:
string_if_invalid = context.template.engine.string_if_invalid
if string_if_invalid:
if '%s' in string_if_invalid:
return string_if_invalid % self.var
else:
return string_if_invalid
else:
obj = string_if_invalid
else:
obj = self.var
for func, args in self.filters:
arg_vals = []
for lookup, arg in args:
if not lookup:
arg_vals.append(mark_safe(arg))
else:
arg_vals.append(arg.resolve(context))
if getattr(func, 'expects_localtime', False):
obj = template_localtime(obj, context.use_tz)
if getattr(func, 'needs_autoescape', False):
new_obj = func(obj, autoescape=context.autoescape, *arg_vals)
else:
new_obj = func(obj, *arg_vals)
if getattr(func, 'is_safe', False) and isinstance(obj, SafeData):
obj = mark_safe(new_obj)
else:
obj = new_obj
return obj
def args_check(name, func, provided):
provided = list(provided)
# First argument, filter input, is implied.
plen = len(provided) + 1
# Check to see if a decorator is providing the real function.
func = getattr(func, '_decorated_function', func)
args, _, _, defaults = getargspec(func)
alen = len(args)
dlen = len(defaults or [])
# Not enough OR Too many
if plen < (alen - dlen) or plen > alen:
raise TemplateSyntaxError("%s requires %d arguments, %d provided" %
(name, alen - dlen, plen))
return True
args_check = staticmethod(args_check)
def __str__(self):
return self.token
class Variable:
"""
A template variable, resolvable against a given context. The variable may
be a hard-coded string (if it begins and ends with single or double quote
marks)::
>>> c = {'article': {'section':'News'}}
>>> Variable('article.section').resolve(c)
'News'
>>> Variable('article').resolve(c)
{'section': 'News'}
>>> class AClass: pass
>>> c = AClass()
>>> c.article = AClass()
>>> c.article.section = 'News'
(The example assumes VARIABLE_ATTRIBUTE_SEPARATOR is '.')
"""
def __init__(self, var):
self.var = var
self.literal = None
self.lookups = None
self.translate = False
self.message_context = None
if not isinstance(var, str):
raise TypeError(
"Variable must be a string or number, got %s" % type(var))
try:
# First try to treat this variable as a number.
#
# Note that this could cause an OverflowError here that we're not
# catching. Since this should only happen at compile time, that's
# probably OK.
self.literal = float(var)
# So it's a float... is it an int? If the original value contained a
# dot or an "e" then it was a float, not an int.
if '.' not in var and 'e' not in var.lower():
self.literal = int(self.literal)
# "2." is invalid
if var.endswith('.'):
raise ValueError
except ValueError:
# A ValueError means that the variable isn't a number.
if var.startswith('_(') and var.endswith(')'):
# The result of the lookup should be translated at rendering
# time.
self.translate = True
var = var[2:-1]
# If it's wrapped with quotes (single or double), then
# we're also dealing with a literal.
try:
self.literal = mark_safe(unescape_string_literal(var))
except ValueError:
# Otherwise we'll set self.lookups so that resolve() knows we're
# dealing with a bonafide variable
if var.find(VARIABLE_ATTRIBUTE_SEPARATOR + '_') > -1 or var[0] == '_':
raise TemplateSyntaxError("Variables and attributes may "
"not begin with underscores: '%s'" %
var)
self.lookups = tuple(var.split(VARIABLE_ATTRIBUTE_SEPARATOR))
def resolve(self, context):
"""Resolve this variable against a given context."""
if self.lookups is not None:
# We're dealing with a variable that needs to be resolved
value = self._resolve_lookup(context)
else:
# We're dealing with a literal, so it's already been "resolved"
value = self.literal
if self.translate:
is_safe = isinstance(value, SafeData)
msgid = value.replace('%', '%%')
msgid = mark_safe(msgid) if is_safe else msgid
if self.message_context:
return pgettext_lazy(self.message_context, msgid)
else:
return gettext_lazy(msgid)
return value
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.var)
def __str__(self):
return self.var
def _resolve_lookup(self, context):
"""
Perform resolution of a real variable (i.e. not a literal) against the
given context.
As indicated by the method's name, this method is an implementation
detail and shouldn't be called by external code. Use Variable.resolve()
instead.
"""
current = context
try: # catch-all for silent variable failures
for bit in self.lookups:
try: # dictionary lookup
current = current[bit]
# ValueError/IndexError are for numpy.array lookup on
# numpy < 1.9 and 1.9+ respectively
except (TypeError, AttributeError, KeyError, ValueError, IndexError):
try: # attribute lookup
# Don't return class attributes if the class is the context:
if isinstance(current, BaseContext) and getattr(type(current), bit):
raise AttributeError
current = getattr(current, bit)
except (TypeError, AttributeError):
# Reraise if the exception was raised by a @property
if not isinstance(current, BaseContext) and bit in dir(current):
raise
try: # list-index lookup
current = current[int(bit)]
except (IndexError, # list index out of range
ValueError, # invalid literal for int()
KeyError, # current is a dict without `int(bit)` key
TypeError): # unsubscriptable object
raise VariableDoesNotExist("Failed lookup for key "
"[%s] in %r",
(bit, current)) # missing attribute
if callable(current):
if getattr(current, 'do_not_call_in_templates', False):
pass
elif getattr(current, 'alters_data', False):
current = context.template.engine.string_if_invalid
else:
try: # method call (assuming no args required)
current = current()
except TypeError:
try:
inspect.getcallargs(current)
except TypeError: # arguments *were* required
current = context.template.engine.string_if_invalid # invalid method call
else:
raise
except Exception as e:
template_name = getattr(context, 'template_name', None) or 'unknown'
logger.debug(
"Exception while resolving variable '%s' in template '%s'.",
bit,
template_name,
exc_info=True,
)
if getattr(e, 'silent_variable_failure', False):
current = context.template.engine.string_if_invalid
else:
raise
return current
class Node:
# Set this to True for nodes that must be first in the template (although
# they can be preceded by text nodes.
must_be_first = False
child_nodelists = ('nodelist',)
token = None
def render(self, context):
"""
Return the node rendered as a string.
"""
pass
def render_annotated(self, context):
"""
Render the node. If debug is True and an exception occurs during
rendering, the exception is annotated with contextual line information
where it occurred in the template. For internal usage this method is
preferred over using the render method directly.
"""
try:
return self.render(context)
except Exception as e:
if context.template.engine.debug and not hasattr(e, 'template_debug'):
e.template_debug = context.render_context.template.get_exception_info(e, self.token)
raise
def __iter__(self):
yield self
def get_nodes_by_type(self, nodetype):
"""
Return a list of all nodes (within this node and its nodelist)
of the given type
"""
nodes = []
if isinstance(self, nodetype):
nodes.append(self)
for attr in self.child_nodelists:
nodelist = getattr(self, attr, None)
if nodelist:
nodes.extend(nodelist.get_nodes_by_type(nodetype))
return nodes
class NodeList(list):
# Set to True the first time a non-TextNode is inserted by
# extend_nodelist().
contains_nontext = False
def render(self, context):
bits = []
for node in self:
if isinstance(node, Node):
bit = node.render_annotated(context)
else:
bit = node
bits.append(str(bit))
return mark_safe(''.join(bits))
def get_nodes_by_type(self, nodetype):
"Return a list of all nodes of the given type"
nodes = []
for node in self:
nodes.extend(node.get_nodes_by_type(nodetype))
return nodes
class TextNode(Node):
def __init__(self, s):
self.s = s
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.s[:25])
def render(self, context):
return self.s
def render_value_in_context(value, context):
"""
Convert any value to a string to become part of a rendered template. This
means escaping, if required, and conversion to a string. If value is a
string, it's expected to already be translated.
"""
value = template_localtime(value, use_tz=context.use_tz)
value = localize(value, use_l10n=context.use_l10n)
if context.autoescape:
if not issubclass(type(value), str):
value = str(value)
return conditional_escape(value)
else:
return str(value)
class VariableNode(Node):
def __init__(self, filter_expression):
self.filter_expression = filter_expression
def __repr__(self):
return "<Variable Node: %s>" % self.filter_expression
def render(self, context):
try:
output = self.filter_expression.resolve(context)
except UnicodeDecodeError:
# Unicode conversion can fail sometimes for reasons out of our
# control (e.g. exception rendering). In that case, we fail
# quietly.
return ''
return render_value_in_context(output, context)
# Regex for token keyword arguments
kwarg_re = re.compile(r"(?:(\w+)=)?(.+)")
def token_kwargs(bits, parser, support_legacy=False):
"""
Parse token keyword arguments and return a dictionary of the arguments
retrieved from the ``bits`` token list.
`bits` is a list containing the remainder of the token (split by spaces)
that is to be checked for arguments. Valid arguments are removed from this
list.
`support_legacy` - if True, the legacy format ``1 as foo`` is accepted.
Otherwise, only the standard ``foo=1`` format is allowed.
There is no requirement for all remaining token ``bits`` to be keyword
arguments, so return the dictionary as soon as an invalid argument format
is reached.
"""
if not bits:
return {}
match = kwarg_re.match(bits[0])
kwarg_format = match and match.group(1)
if not kwarg_format:
if not support_legacy:
return {}
if len(bits) < 3 or bits[1] != 'as':
return {}
kwargs = {}
while bits:
if kwarg_format:
match = kwarg_re.match(bits[0])
if not match or not match.group(1):
return kwargs
key, value = match.groups()
del bits[:1]
else:
if len(bits) < 3 or bits[1] != 'as':
return kwargs
key, value = bits[2], bits[0]
del bits[:3]
kwargs[key] = parser.compile_filter(value)
if bits and not kwarg_format:
if bits[0] != 'and':
return kwargs
del bits[:1]
return kwargs
|
|
"""
models.py
Defines the database models.
"""
from datetime import datetime
from sqlalchemy import event
from sqlalchemy.event import listens_for
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import UserMixin
import bcrypt
db = SQLAlchemy()
resourcecategory = db.Table(
'resourcecategory',
db.Column('resource_id', db.Integer, db.ForeignKey('resource.id'), primary_key=True),
db.Column('category_id', db.Integer, db.ForeignKey('category.id'), primary_key=True)
)
class Resource(db.Model):
"""
A resource used to recommend health care
providers to L.G.B.T.Q.I.A people.
"""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Unicode(250), nullable=False)
organization = db.Column(db.Unicode(500))
description = db.Column(db.UnicodeText)
visible = db.Column(db.Boolean, nullable=False, default=True)
address = db.Column(db.Unicode(500))
latitude = db.Column(db.Float)
longitude = db.Column(db.Float)
location = db.Column(db.Unicode(500))
email = db.Column(db.Unicode(250))
phone = db.Column(db.Unicode(50))
fax = db.Column(db.Unicode(50))
url = db.Column(db.Unicode(500))
hours = db.Column(db.UnicodeText)
source = db.Column(db.UnicodeText)
npi = db.Column(db.Unicode(10))
notes = db.Column(db.UnicodeText)
date_created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
last_updated = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
date_verified = db.Column(db.Date)
categories = db.relationship('Category', secondary=resourcecategory,
backref=db.backref('resources', lazy='dynamic'))
category_text = db.Column(db.UnicodeText)
def __unicode__(self):
return self.name
class Category(db.Model):
"""
A category to which one or more resources can belong.
"""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Unicode(100), nullable=False, unique=True)
description = db.Column(db.UnicodeText)
keywords = db.Column(db.UnicodeText)
visible = db.Column(db.Boolean, nullable=False, default=True)
date_created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
def __unicode__(self):
return self.name
class User(UserMixin, db.Model):
"""
A RAD user.
"""
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.Unicode(50), nullable=False, unique=True)
password = db.Column(db.Unicode(128), nullable=False, server_default="")
email = db.Column(db.Unicode(250), nullable=False)
admin = db.Column(db.Boolean, nullable=False, default=False)
active = db.Column(db.Boolean, nullable=False, default=True)
default_location = db.Column(db.Unicode(500))
default_latitude = db.Column(db.Float)
default_longitude = db.Column(db.Float)
date_created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
"""
The name to display with a user's reviews.
"""
display_name = db.Column(db.Unicode(100), nullable=False, server_default='')
"""
Indicates if a user has confirmed their account by clicking
the link in the provided email. The confirmation code
will be stored in email_code.
"""
email_activated = db.Column(db.Boolean, nullable=False, default=False, server_default='1')
"""
The date/time the user requested a password reset. The code
to reset the password will be stored in email_code.
"""
reset_pass_date = db.Column(db.DateTime, nullable=True)
"""
The code used for email registration and password resets.
This will be a string representation of a UUID, in lowercase
and without brackets.
"""
email_code = db.Column(db.Unicode(36), nullable=True)
def __init__(self, username=None, email=None, password=None):
self.username = username
self.email = email
if password is not None:
self.password = bcrypt.hashpw(password, bcrypt.gensalt())
def verify_password(self, password):
return bcrypt.hashpw(password, self.password) == self.password
def __unicode__(self):
return self.username
class Review(db.Model):
"""
A review of a resource by a user.
"""
id = db.Column(db.Integer, primary_key=True)
text = db.Column(db.UnicodeText, nullable=False)
rating = db.Column(db.Integer)
intake_rating = db.Column(db.Integer)
staff_rating = db.Column(db.Integer)
composite_rating = db.Column(db.Float)
visible = db.Column(db.Boolean, nullable=False, default=True)
date_created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
ip = db.Column(db.Unicode(45))
resource_id = db.Column(db.Integer, db.ForeignKey('resource.id'), nullable=False)
resource = db.relationship('Resource',
backref=db.backref('reviews',
lazy='dynamic'))
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
user = db.relationship('User',
backref=db.backref('reviews',
lazy='dynamic'))
is_old_review = db.Column(db.Boolean, nullable=False, default=False, server_default='0')
new_review_id = db.Column(db.Integer, db.ForeignKey('review.id'), nullable=True)
# We want to passively delete here because we'll be manually updating
# foreign key references in the review service.
old_reviews = db.relationship('Review',
backref=db.backref("new_review", remote_side=id),
lazy="dynamic",
passive_deletes=True)
def __init__(self, rating=None, text=None, resource=None, user=None):
self.text = text
self.rating = rating
self.resource = resource
self.user = user
def __unicode__(self):
return self.text
class LoginHistory(db.Model):
"""
A recorded login attempt (successful or otherwise) for a user.
"""
id = db.Column(db.Integer, primary_key=True)
login_date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
ip = db.Column(db.Unicode(45), nullable=False)
username = db.Column(db.Unicode(50), nullable=False)
successful = db.Column(db.Boolean, nullable=False)
failure_reason = db.Column(db.Unicode(20))
@listens_for(Resource, 'before_insert')
@listens_for(Resource, 'before_update')
def normalize_resource(mapper, connect, target):
"""
Normalizes a resource before it is saved to the database.
This ensures that the resource's categories are properly
denormalized in the category_text and that the resource's
URL starts with some sort of http:// or https:// prefix
if it has been provided.
Args:
mapper: The mapper that is the target of the event.
connection: The database connection being used.
target: The resource being persisted to the database.
"""
# If we have categories, denormalize the category text
# so that we can use it in text-based searching
if target.categories:
target.category_text = ', '.join(c.name + ' ' + (c.keywords or '') for c in target.categories)
else:
target.category_text = ''
# If we have a URL and it doesn't start with http://
# or https://, append http:// to the beginning
if target.url and \
not target.url.isspace() and \
not target.url.lower().strip().startswith(('http://', 'https://')):
target.url = 'http://' + target.url.strip()
@listens_for(Review, 'before_insert')
@listens_for(Review, 'before_update')
def normalize_review(mapper, connect, target):
"""
Normalizes a review before it is saved to the database.
This ensures that the composite rating is properly
calculated.
Args:
mapper: The mapper that is the target of the event.
connection: The database connection being used.
target: The resource being persisted to the database.
"""
composite_rating = 0.0
ratings_count = 0.0
if target.rating is not None and target.rating > 0:
composite_rating += target.rating
ratings_count += 1.0
if target.staff_rating is not None and target.staff_rating > 0:
composite_rating += target.staff_rating
ratings_count += 1.0
if target.intake_rating is not None and target.intake_rating > 0:
composite_rating += target.intake_rating
ratings_count += 1.0
# Create an average based on the number of submitted ratings
# and store that in composite_rating
if ratings_count > 0.0:
target.composite_rating = composite_rating / ratings_count
else:
target.composite_rating = None
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
from gbpclient.v2_0 import client as gbp_client
from keystoneauth1.identity import v2
from keystoneauth1.identity import v3
from keystoneauth1 import session
from keystoneclient.v2_0 import client as identity_client
from keystoneclient.v3 import client as keyclientv3
from neutronclient.v2_0 import client as neutron_client
from novaclient import client as nova_client
from novaclient import exceptions as nova_exc
from gbpservice._i18n import _
from gbpservice.nfp.core import log as nfp_logging
LOG = nfp_logging.getLogger(__name__)
class OpenstackApi(object):
"""Initializes common attributes for openstack client drivers."""
def __init__(self, config, username=None,
password=None, tenant_name=None):
self.nova_version = '2'
self.config = config
self.identity_service = ("%s://%s:%d/%s/" %
(config.nfp_keystone_authtoken.auth_protocol,
config.nfp_keystone_authtoken.auth_host,
config.nfp_keystone_authtoken.auth_port,
config.nfp_keystone_authtoken.auth_version))
self.network_service = ("%s://%s:%d/" %
(config.nfp_keystone_authtoken.auth_protocol,
config.nfp_keystone_authtoken.auth_host,
config.bind_port))
self.username = username or config.nfp_keystone_authtoken.admin_user
self.password = password or (
config.nfp_keystone_authtoken.admin_password)
self.tenant_name = (tenant_name or
config.nfp_keystone_authtoken.admin_tenant_name)
self.token = None
self.admin_tenant_id = None
class KeystoneClient(OpenstackApi):
""" Keystone Client Apis for orchestrator. """
def get_admin_token(self):
try:
admin_token = self.get_scoped_keystone_token(
self.config.nfp_keystone_authtoken.admin_user,
self.config.nfp_keystone_authtoken.admin_password,
self.config.nfp_keystone_authtoken.admin_tenant_name)
except Exception as ex:
err = ("Failed to obtain user token. Error: %s" % ex)
LOG.error(err)
raise Exception(err)
return admin_token
def get_scoped_keystone_token(self, user, password, tenant_name,
tenant_id=None):
""" Get a scoped token from Openstack Keystone service.
A scoped token is bound to the specific tenant.
:param user: User name
:param password: Password
:param tenantName: Tenant name
:return: A scoped token or None if unable to get
"""
if not (tenant_name or tenant_id):
err = "Tenant Not specified for getting a scoped token"
LOG.error(err)
raise Exception(err)
try:
auth = v2.Password(username=user,
password=password,
tenant_name=tenant_name,
auth_url=self.identity_service)
sess = session.Session(auth=auth)
scoped_token = sess.get_token(auth=auth)
except Exception as err:
err = ("Failed to get token from"
" Openstack Keystone service"
" KeyError :: %s" % (err))
self.config.nfp_keystone_authtoken.auth_port,
LOG.error(err)
raise Exception(err)
else:
return scoped_token
def get_admin_tenant_id(self, token):
if not self.admin_tenant_id:
self.admin_tenant_id = self.get_tenant_id(
token,
self.config.nfp_keystone_authtoken.admin_tenant_name)
return self.admin_tenant_id
def get_tenant_id(self, token, tenant_name):
""" Get the tenant UUID associated to tenant name
:param token: A scoped token
:param tenant: Tenant name
:return: Tenant UUID
"""
try:
keystone = self._get_v2_keystone_admin_client()
tenant = keystone.tenants.find(name=tenant_name)
return tenant.id
except Exception as ex:
err = ("Failed to read tenant UUID from"
" tenant_name %s."
" Error :: %s" % (tenant_name, ex))
LOG.error(err)
raise Exception(err)
err = 'No tenant with name "%s" found in keystone db' % tenant_name
LOG.error(err)
raise Exception(err)
def _get_v2_keystone_admin_client(self):
""" Returns keystone v2 client with admin credentials
Using this client one can perform CRUD operations over
keystone resources.
"""
keystone_conf = self.config.nfp_keystone_authtoken
auth = v2.Password(username=keystone_conf.admin_user,
password=keystone_conf.admin_password,
tenant_name=keystone_conf.admin_tenant_name,
auth_url=self.identity_service)
sess = session.Session(auth=auth)
v2client = identity_client.Client(session=sess)
return v2client
def _get_v3_keystone_admin_client(self):
""" Returns keystone v3 client with admin credentials
Using this client one can perform CRUD operations over
keystone resources.
"""
keystone_conf = self.config.nfp_keystone_authtoken
v3_auth_url = ('%s://%s:%s/%s/' % (
keystone_conf.auth_protocol, keystone_conf.auth_host,
keystone_conf.auth_port, self.config.heat_driver.keystone_version))
auth = v3.Password(auth_url=v3_auth_url,
user_domain_name='Default',
username=keystone_conf.admin_user,
password=keystone_conf.admin_password,
project_domain_name="Default",
project_name=keystone_conf.admin_tenant_name)
sess = session.Session(auth=auth)
v3client = keyclientv3.Client(session=sess)
return v3client
class NovaClient(OpenstackApi):
""" Nova Client Api driver. """
def get_image_id(self, token, tenant_id, image_name):
""" Get the image UUID associated to image name
:param token: A scoped token
:param tenant_id: Tenant UUID
:param image_name: Image name
:return: Image UUID
"""
try:
nova = nova_client.Client(self.nova_version, auth_token=token,
tenant_id=tenant_id,
auth_url=self.identity_service)
image = nova.images.find(name=image_name)
return image.id
except Exception as ex:
err = ("Failed to get image id from image name %s: %s" % (
image_name, ex))
LOG.error(err)
raise Exception(err)
def get_image_metadata(self, token, tenant_id, image_name):
""" Get the image UUID associated to image name
:param token: A scoped token
:param tenant_id: Tenant UUID
:param image_name: Image name
:return: Image UUID
"""
try:
nova = nova_client.Client(self.nova_version, auth_token=token,
tenant_id=tenant_id,
auth_url=self.identity_service)
image = nova.images.find(name=image_name)
return image.metadata
except Exception as ex:
err = ("Failed to get image metadata from image name %s: %s" % (
image_name, ex))
LOG.error(err)
raise Exception(err)
def get_flavor_id(self, token, tenant_id, flavor_name):
""" Get the flavor UUID associated to flavor name
:param token: A scoped token
:param tenant_id: Tenant UUID
:param flavor_name: Flavor name
:return: Flavor UUID or None if not found
"""
try:
nova = nova_client.Client(self.nova_version, auth_token=token,
tenant_id=tenant_id,
auth_url=self.identity_service)
flavor = nova.flavors.find(name=flavor_name)
return flavor.id
except Exception as ex:
err = ("Failed to get flavor id from flavor name %s: %s" % (
flavor_name, ex))
LOG.error(err)
raise Exception(err)
def get_instance(self, token, tenant_id, instance_id):
""" Get instance details
:param token: A scoped_token
:param tenant_id: Tenant UUID
:param instance_id: Instance UUID
:return: Instance details
"""
try:
nova = nova_client.Client(self.nova_version, auth_token=token,
tenant_id=tenant_id,
auth_url=self.identity_service)
instance = nova.servers.get(instance_id)
if instance:
return instance.to_dict()
raise Exception(_("No instance with id %(id)s "
"found in db for tenant %(tenant)s")
% {'id': instance_id, 'tenant': tenant_id})
except Exception as ex:
err = ("Failed to read instance information from"
" Openstack Nova service's response"
" KeyError :: %s" % (ex))
LOG.error(err)
raise Exception(err)
def get_keypair(self, token, tenant_id, keypair_name):
""" Get Nova keypair details
:param token: A scoped_token
:param tenant_id: Tenant UUID
:param keypair_name: Nova keypair name
:return: Nova keypair details
"""
tenant_id = str(tenant_id)
try:
nova = nova_client.Client(self.nova_version, auth_token=token,
tenant_id=tenant_id,
auth_url=self.identity_service)
keypair = nova.keypairs.find(name=keypair_name)
return keypair.to_dict()
except Exception as ex:
err = ("Failed to read keypair information from"
" Openstack Nova service's response."
" %s" % ex)
LOG.error(err)
raise Exception(err)
def attach_interface(self, token, tenant_id, instance_id, port_id):
""" Attaches a port to already created instance
:param token: A scoped token
:param tenant_id: Tenant UUID
:param instance_id: UUID of the instance
:param port_id: Port UUID
"""
try:
nova = nova_client.Client(self.nova_version, auth_token=token,
tenant_id=tenant_id,
auth_url=self.identity_service)
instance = nova.servers.interface_attach(instance_id, port_id,
None, None)
return instance
except Exception as ex:
err = ("Failed to attach interface %s to instance"
" %s %s" % (port_id, instance_id, ex))
LOG.error(err)
raise Exception(err)
def detach_interface(self, token, tenant_id, instance_id, port_id):
""" Detaches a port to already created instance
:param token: A scoped token
:param tenant_id: Tenant UUID
:param instance_id: UUID of the instance
:param port_id: Port UUID
"""
try:
nova = nova_client.Client(self.nova_version, auth_token=token,
tenant_id=tenant_id,
auth_url=self.identity_service)
instance = nova.servers.interface_detach(instance_id, port_id)
return instance
except Exception as ex:
err = ("Failed to detach interface %s from instance"
" %s %s" % (port_id, instance_id, ex))
LOG.error(err)
raise Exception(err)
def delete_instance(self, token, tenant_id, instance_id):
""" Delete the instance
:param token: A scoped token
:param tenant_id: Tenant UUID
:param instance_id: Instance UUID
"""
try:
nova = nova_client.Client(self.nova_version, auth_token=token,
tenant_id=tenant_id,
auth_url=self.identity_service)
nova.servers.delete(instance_id)
except Exception as ex:
err = ("Failed to delete instance"
" %s %s" % (instance_id, ex))
LOG.error(err)
raise Exception(err)
def get_instances(self, token, filters=None):
""" List instances
:param token: A scoped_token
:param filters: Parameters for list filter
example for filter: {}, tenant_id is mandatory
:return: instance List
"""
if (
not filters or
type(filters) != dict or
'tenant_id' not in filters
):
err = ("Failed to process get_instances,"
" filters(type: dict) with tenant_id is mandatory")
LOG.error(err)
raise Exception(err)
tenant_id = filters.get('tenant_id')
try:
nova = nova_client.Client(self.nova_version, auth_token=token,
tenant_id=tenant_id,
auth_url=self.identity_service)
instances = nova.servers.list(search_opts=filters)
data = [instance.to_dict() for instance in instances]
return data
except Exception as ex:
err = ("Failed to list instances under tenant"
" %s %s" % (tenant_id, ex))
LOG.error(err)
raise Exception(err)
def delete_affinity_group(self, token, tenant_id, nf_id):
""" Deletes a server group
:param token: A scoped token
:param tenant_id: Tenant UUID
:param nf_id: Network Function UUID
Returns: None
"""
nova_version = 2.15
nova = nova_client.Client(nova_version, auth_token=token,
tenant_id=tenant_id,
auth_url=self.identity_service)
try:
affinity_group = nova.server_groups.find(name=nf_id)
affinity_group_id = affinity_group.to_dict()['id']
nova.server_groups.delete(affinity_group_id)
msg = ("Successfully deleted Nova Server Anti-Affinity "
"Group: %s" % nf_id)
LOG.info(msg)
except nova_exc.NotFound:
pass
except Exception as err:
msg = ("Failed to delete Nova Server Anti-Affinity Group "
"with name %s. Error: %s" % (nf_id, err))
LOG.error(msg)
def create_affinity_group(self, token, tenant_id, nf_id):
""" Creates a server group
:param token: A scoped token
:param tenant_id: Tenant UUID
:param nf_id: Network Function UUID
Returns: Nova server-group json object
"""
nova_version = 2.15
kwargs = dict(name=nf_id, policies=['soft-anti-affinity'])
nova = nova_client.Client(nova_version, auth_token=token,
tenant_id=tenant_id,
auth_url=self.identity_service)
try:
affinity_group = nova.server_groups.create(**kwargs)
affinity_group_id = affinity_group.to_dict()['id']
msg = ("Successfully created Nova Server Anti-Affinity "
"Group: %s" % nf_id)
LOG.info(msg)
return affinity_group_id
except Exception as err:
msg = ("Failed to create Nova Server Anti-Affinity Group. "
"Error: %s" % err)
LOG.error(msg)
return None
def create_instance(self, token, tenant_id, image_id, flavor,
nw_port_id_list, name, volume_support,
volume_size, secgroup_name=None,
metadata=None, files=None, config_drive=False,
userdata=None, key_name='', server_grp_id=None,
):
""" Launch a VM with given details
:param token: A scoped token
:param tenant_id: Tenant UUID
:param image_id: Image UUID
:param flavor: Flavor name
:param nw_port_id_list: Network UUID and port UUID list
:param name: Service istance name
:param secgroup_name: Nova security group name
:param metadata: metadata key-value pairs
:param files: List of files to be copied.
:example files: [{"dst": <detination_path_string>,
"src": <file_contents>}]
:param userdata: user data to pass to be exposed by the metadata
server this can be a file type object as well or a
string
:param key_name: Nova keypair name
:param server_grp_id: Nova server group UUID
:param volume_support: volume support to launch instance
:param volume_size: cinder volume size in GB
:return: VM instance UUID
"""
try:
if files:
file_dict = {}
for _file in files:
with open(_file["src"]) as config_file:
data = config_file.read()
config_drive = True
file_dict.update({_file["dst"]: data})
files = file_dict
except Exception as e:
msg = (
"Failed while reading file: %r " % e)
LOG.error(msg)
raise e
kwargs = dict()
if volume_support:
block_device_mapping_v2 = [
{
"boot_index": "1",
"uuid": image_id,
"source_type": "image",
"volume_size": volume_size,
"destination_type": "volume",
"delete_on_termination": True
}
]
kwargs.update(block_device_mapping_v2=block_device_mapping_v2)
if server_grp_id:
kwargs.update(scheduler_hints={"group": server_grp_id})
if key_name != '':
kwargs.update(key_name=key_name)
if config_drive is True:
kwargs.update(config_drive=True)
if userdata is not None and \
(type(userdata) is str or type(userdata) is io.IOBase):
kwargs.update(userdata=userdata)
if metadata is not None and type(metadata) is dict and metadata != {}:
kwargs.update(meta=metadata)
if files is not None:
kwargs.update(files=files)
if nw_port_id_list:
nics = [{"port-id": entry.get("port"), "net-id": entry.get("uuid"),
"v4-fixed-ip": entry.get("fixed_ip")}
for entry in nw_port_id_list]
kwargs.update(nics=nics)
if secgroup_name:
kwargs.update(security_groups=[secgroup_name])
try:
nova = nova_client.Client(self.nova_version, auth_token=token,
tenant_id=tenant_id,
auth_url=self.identity_service)
flavor = nova.flavors.find(name=flavor)
instance = nova.servers.create(name, nova.images.get(image_id),
flavor, **kwargs)
data = instance.to_dict()
return data['id']
except Exception as ex:
err = ("Failed to create instance under tenant"
" %s %s" % (tenant_id, ex))
LOG.error(err)
raise Exception(err)
class NeutronClient(OpenstackApi):
""" Neutron Client Api Driver. """
def get_floating_ip(self, token, floatingip_id):
""" Get floatingip details
:param token: A scoped_token
:param floatingip_id: Port UUID
:return: floatingip details
"""
try:
neutron = neutron_client.Client(token=token,
endpoint_url=self.network_service)
return neutron.show_floatingip(floatingip_id)['floatingip']
except Exception as ex:
err = ("Failed to read floatingip from"
" Openstack Neutron service's response"
" KeyError :: %s" % (ex))
LOG.error(err)
raise Exception(err)
def get_floating_ips(self, token, **filters):
""" Get list of floatingips, associated with port if passed"""
try:
neutron = neutron_client.Client(token=token,
endpoint_url=self.network_service)
return neutron.list_floatingips(**filters)['floatingips']
except Exception as ex:
err = ("Failed to read floatingips from"
" Openstack Neutron service's response"
" KeyError :: %s" % (ex))
LOG.error(err)
raise Exception(err)
def get_security_groups(self, token, tenant_id=None, filters=None):
""" Get list of security groups"""
try:
neutron = neutron_client.Client(token=token,
endpoint_url=self.network_service)
filters = filters if filters is not None else {}
return neutron.list_security_groups(**filters)['security_groups']
except Exception as ex:
err = ("Failed to get security groups from"
" Openstack Neutron service's response"
" KeyError :: %s" % (ex))
LOG.error(err)
raise Exception(err)
def create_security_group(self, token, attrs=None):
""" Create security group"""
try:
neutron = neutron_client.Client(token=token,
endpoint_url=self.network_service)
sg_info = {"security_group": attrs}
return neutron.create_security_group(body=sg_info)[
'security_group']
except Exception as ex:
err = ("Failed to get security groups from"
" Openstack Neutron service's response"
" KeyError :: %s" % (ex))
LOG.error(err)
raise Exception(err)
def create_security_group_rule(self, token, attrs=None):
""" Create security group rule"""
try:
neutron = neutron_client.Client(token=token,
endpoint_url=self.network_service)
# attrs={'direction': 'egress', 'protocol': 'TCP',
# 'security_group_id': 'c90c7b29-f653-4c41-ae1a-0290dc64e020'}
sg_rule_info = {"security_group_rule": attrs}
return neutron.create_security_group_rule(
body=sg_rule_info)['security_group_rule']
except Exception as ex:
err = ("Failed to get security groups from"
" Openstack Neutron service's response"
" KeyError :: %s" % (ex))
LOG.error(err)
# raise Exception(err)
def get_ports(self, token, filters=None):
""" List Ports
:param token: A scoped_token
:param filters: Parameters for list filter
example for filter: ?tenant_id=%s&id=%s
:return: Port List
"""
try:
neutron = neutron_client.Client(token=token,
endpoint_url=self.network_service)
ports = neutron.list_ports(**filters).get('ports', [])
return ports
except Exception as ex:
err = ("Failed to read port list from"
" Openstack Neutron service's response"
" KeyError :: %s" % (ex))
LOG.error(err)
raise Exception(err)
def get_port(self, token, port_id):
""" Get port details
:param token: A scoped_token
:param port_id: Port UUID
:return: Port details
"""
try:
neutron = neutron_client.Client(token=token,
endpoint_url=self.network_service)
return neutron.show_port(port_id)
except Exception as ex:
err = ("Failed to read port information"
" Exception :: %s" % (ex))
LOG.error(err)
raise Exception(err)
def get_subnets(self, token, filters=None):
""" List subnets
:param token: A scoped_token
:param filters: Parameters for list filter
example for filter: ?tenant_id=%s&id=%s
:return: Subnet List
"""
try:
neutron = neutron_client.Client(token=token,
endpoint_url=self.network_service)
subnets = neutron.list_subnets(**filters).get('subnets', [])
return subnets
except Exception as ex:
err = ("Failed to read subnet list from"
" Openstack Neutron service's response"
" KeyError :: %s" % (ex))
LOG.error(err)
raise Exception(err)
def get_subnet(self, token, subnet_id):
""" Get subnet details
:param token: A scoped_token
:param subnet_id: Subnet UUID
:return: Subnet details
"""
try:
neutron = neutron_client.Client(token=token,
endpoint_url=self.network_service)
return neutron.show_subnet(subnet_id)
except Exception as ex:
err = ("Failed to read subnet from"
" Openstack Neutron service's response"
" KeyError :: %s" % (ex))
LOG.error(err)
raise Exception(err)
def delete_floatingip(self, token, floatingip_id):
""" Delete the floatingip
:param token: A scoped token
:param tenant_id: Tenant UUID
:param floatingip_id: Floatingip UUID
"""
try:
neutron = neutron_client.Client(token=token,
endpoint_url=self.network_service)
return neutron.delete_floatingip(floatingip_id)
except Exception as ex:
err = ("Failed to delete floatingip from"
" Openstack Neutron service's response"
" KeyError :: %s" % (ex))
LOG.error(err)
raise Exception(err)
def update_port(self, token, port_id, **kwargs):
"""
:param token:
:param port_id:
:param kwargs: name=<>, allowed_address_pairs={'ip_address': <>,
'mac_address': <>}
:return:
"""
try:
neutron = neutron_client.Client(token=token,
endpoint_url=self.network_service)
port_info = dict(port={})
port_info['port'].update(kwargs)
return neutron.update_port(port_id, body=port_info)
except Exception as ex:
err = ("Failed to update port info"
" Error :: %s" % (ex))
LOG.error(err)
raise Exception(err)
def get_floating_ips_for_ports(self, token, **kwargs):
"""
:param self:
:param token:
:param kwargs:
:return:
"""
data = {'floatingips': []}
try:
neutron = neutron_client.Client(token=token,
endpoint_url=self.network_service)
data = neutron.list_floatingips(port_id=[kwargs[key]
for key in kwargs])
return data
except Exception as ex:
raise Exception(ex)
def _update_floatingip(self, token, floatingip_id, data):
""" Update the floatingip
:param token: A scoped token
:param floatingip_id: Floatingip UUID
:param data: data to update
"""
try:
neutron = neutron_client.Client(token=token,
endpoint_url=self.network_service)
return neutron.update_floatingip(floatingip_id, body=data)
except Exception as ex:
err = ("Failed to update floatingip from"
" Openstack Neutron service's response"
" KeyError :: %s" % (ex))
LOG.error(err)
raise Exception(err)
def disassociate_floating_ip(self, token, floatingip_id):
"""
:param self:
:param token:
:param floatingip_id:
:return:
"""
info = {
"floatingip": {
"port_id": None}
}
self._update_floatingip(token, floatingip_id, info)
LOG.debug("Successfully disassociated floatingip %s",
floatingip_id)
def associate_floating_ip(self, token, floatingip_id, port_id):
"""
:param self:
:param token:
:param floatingip_id:
:return:
"""
info = {
"floatingip": {
"port_id": port_id}
}
self._update_floatingip(token, floatingip_id, info)
LOG.debug("Successfully associated floatingip %s",
floatingip_id)
def list_ports(self, token, port_ids=None, **kwargs):
"""
:param token:
:param port_ids:
:param kwargs:
:return:
"""
try:
neutron = neutron_client.Client(token=token,
endpoint_url=self.network_service)
port_ids = port_ids if port_ids is not None else []
if port_ids:
ports = neutron.list_ports(id=port_ids).get('ports', [])
else:
ports = neutron.list_ports(**kwargs)
return ports
except Exception as ex:
err = ("Failed to list ports %s" % ex)
LOG.error(err)
raise Exception(err)
def list_subnets(self, token, subnet_ids=None, **kwargs):
"""
:param token:
:param subnet_ids:
:param kwargs:
:return:
"""
try:
neutron = neutron_client.Client(token=token,
endpoint_url=self.network_service)
subnet_ids = subnet_ids if subnet_ids is not None else []
subnets = neutron.list_subnets(id=subnet_ids).get('subnets', [])
return subnets
except Exception as ex:
err = ("Failed to list subnets %s" % ex)
LOG.error(err)
raise Exception(err)
def create_port(self, token, tenant_id, net_id, attrs=None):
attr = {
'port': {
# 'tenant_id': tenant_id,
'network_id': net_id
}
}
if attrs:
attr['port'].update(attrs)
try:
neutron = neutron_client.Client(token=token,
endpoint_url=self.network_service)
return neutron.create_port(body=attr)['port']
except Exception as ex:
raise Exception(_("Port creation failed in network: %(net)r "
"of tenant: %(tenant)r Error: %(error)s") %
{'net': net_id,
'tenant': tenant_id,
'error': ex})
def delete_port(self, token, port_id):
"""
:param token:
:param port_id:
:return:
"""
try:
neutron = neutron_client.Client(token=token,
endpoint_url=self.network_service)
return neutron.delete_port(port_id)
except Exception as ex:
err = ("Failed to delete port %s"
" Exception :: %s" % (port_id, ex))
LOG.error(err)
raise Exception(err)
def get_networks(self, token, filters=None):
""" List nets
:param token: A scoped_token
:param filters: Parameters for list filter
example for filter: ?tenant_id=%s&id=%s
:return: network List
"""
try:
neutron = neutron_client.Client(token=token,
endpoint_url=self.network_service)
nets = neutron.list_networks(**filters).get('networks', [])
return nets
except Exception as ex:
err = ("Failed to read network list from"
" Openstack Neutron service's response"
" KeyError :: %s" % (ex))
LOG.error(err)
raise Exception(err)
def delete_nw(self, token, net_id):
"""
:param token:
:param net_id:
:return:
"""
try:
neutron = neutron_client.Client(token=token,
endpoint_url=self.network_service)
return neutron.delete_network(net_id)
except Exception as ex:
err = ('Failed to delete network %s . %s' % (net_id, str(ex)))
LOG.error(err)
def get_pools(self, token, filters=None):
""" List Pools
:param token: A scoped_token
:param filters: Parameters for list filter
example for filter: ?tenant_id=%s&id=%s
:return: Pool List
"""
try:
neutron = neutron_client.Client(token=token,
endpoint_url=self.network_service)
pools = neutron.list_pools(**filters).get('pools', [])
return pools
except Exception as ex:
err = ("Failed to read pool list from"
" Openstack Neutron service's response"
" KeyError :: %s" % (ex))
LOG.error(err)
raise Exception(err)
def get_loadbalancers(self, token, filters=None):
""" List Loadbalancers
:param token: A scoped_token
:param filters: Parameters for list filter
example for filter: ?tenant_id=%s&id=%s
:return: Loadbalancers List
"""
try:
neutron = neutron_client.Client(token=token,
endpoint_url=self.network_service)
loadbalancers = neutron.list_loadbalancers(**filters).get(
'loadbalancers', [])
return loadbalancers
except Exception as ex:
err = ("Failed to read pool list from"
" Openstack Neutron service's response"
" KeyError :: %s" % (ex))
LOG.error(err)
raise Exception(err)
def get_vip(self, token, vip_id):
""" Get vip details
:param token: A scoped_token
:param vip_id: Port UUID
:return: VIP details
"""
try:
neutron = neutron_client.Client(token=token,
endpoint_url=self.network_service)
return neutron.show_vip(vip_id)
except Exception as ex:
err = ("Failed to read vip information"
" Exception :: %s" % (ex))
LOG.error(err)
raise Exception(err)
def get_agents(self, token, filters=None):
""" Get neutron agents
:param token: A scoped_token
:param filters: Parameters for list filter
:return: neutron agents List
"""
try:
neutron = neutron_client.Client(token=token,
endpoint_url=self.network_service)
return neutron.list_agents(**filters).get('agents', [])
except Exception as ex:
err = ("Failed to read agents information"
" Exception :: %s" % (ex))
LOG.error(err)
raise Exception(err)
class GBPClient(OpenstackApi):
""" GBP Client Api Driver. """
def get_policy_target_groups(self, token, filters=None):
""" List Policy Target Groups
:param token: A scoped_token
:param filters: Parameters for list filter
example for filter: ?tenant_id=%s&id=%s
:return: PTG List
"""
try:
gbp = gbp_client.Client(token=token,
endpoint_url=self.network_service)
return gbp.list_policy_target_groups(
**filters)['policy_target_groups']
except Exception as ex:
err = ("Failed to read PTG list from"
" Openstack Neutron service's response."
" KeyError :: %s" % (ex))
LOG.error(err)
raise Exception(err)
def get_policy_target_group(self, token, ptg_id, filters=None):
"""
:param token: A scoped token
:param ptg_id: PTG
:param filters: Optional
:return:
"""
try:
gbp = gbp_client.Client(token=token,
endpoint_url=self.network_service)
filters = filters if filters is not None else {}
return gbp.show_policy_target_group(
ptg_id, **filters)['policy_target_group']
except Exception as ex:
err = ("Failed to read PTG list from"
" Openstack Neutron service's response."
" KeyError :: %s" % (ex))
LOG.error(err)
raise Exception(err)
def update_policy_target_group(self, token, ptg_id,
policy_target_group_info):
""" Updates a GBP Policy Target Group
:param token: A scoped token
:param ptg_id: PTG UUID
:param policy_target_group_info: PTG info dict
:return: PTG dict
"""
try:
gbp = gbp_client.Client(token=token,
endpoint_url=self.network_service)
return gbp.update_policy_target_group(
ptg_id,
body=policy_target_group_info)['policy_target_group']
except Exception as ex:
err = ("Failed to update policy target group. Error :: %s" % (ex))
LOG.error(err)
raise Exception(err)
def create_policy_target(self, token, tenant_id,
policy_target_group_id, name, port_id=None,
description=''):
""" Creates a GBP Policy Target
:param token: A scoped token
:param tenant_id: Tenant UUID
:param policy_target_group_id: PTG UUID
:param name: PT name
:return: PT dict
"""
policy_target_info = {
"policy_target": {
"policy_target_group_id": policy_target_group_id,
"tenant_id": tenant_id,
}
}
if name:
policy_target_info['policy_target'].update({'name': name})
if port_id:
policy_target_info["policy_target"]["port_id"] = port_id
if description:
policy_target_info["policy_target"]["description"] = description
try:
gbp = gbp_client.Client(token=token,
endpoint_url=self.network_service)
return gbp.create_policy_target(
body=policy_target_info)['policy_target']
except Exception as ex:
err = ("Failed to read policy target information from"
" Openstack Neutron service's response."
" KeyError :: %s" % (ex))
LOG.error(err)
raise Exception(err)
def delete_policy_target(self, token, policy_target_id):
""" Delete the GBP policy_target
:param token: A scoped token
:param policy_target_id: PT UUID
"""
try:
gbp = gbp_client.Client(token=token,
endpoint_url=self.network_service)
return gbp.delete_policy_target(policy_target_id)
except Exception as ex:
err = ("Failed to delete policy target information from"
" Openstack Neutron service's response."
" KeyError :: %s" % (ex))
LOG.error(err)
raise Exception(err)
def delete_policy_target_group(self, token, policy_target_group_id):
""" Delete the GBP policy_target group
:param token: A scoped token
:param policy_target_id: PTG UUID
"""
try:
gbp = gbp_client.Client(token=token,
endpoint_url=self.network_service)
return gbp.delete_policy_target_group(policy_target_group_id)
except Exception as ex:
err = ("Failed to delete policy target group from"
" Openstack."
" Error :: %s" % (ex))
LOG.error(err)
raise Exception(err)
def update_policy_target(self, token, policy_target_id, updated_pt):
""" Update the Policy Target
:param token: A scoped token
:param policy_target_id: PT UUID
:param updated_pt: New PT dict
{\"policy_target\": {\"description\": \"test123\"}}
"""
policy_target_info = {
"policy_target": updated_pt
}
try:
gbp = gbp_client.Client(token=token,
endpoint_url=self.network_service)
return gbp.update_policy_target(
policy_target_id, body=policy_target_info)['policy_target']
except Exception as ex:
err = ("Failed to read updated PT information"
". PT %s."
" KeyError :: %s" % (policy_target_id, ex))
LOG.error(err)
raise Exception(err)
def create_policy_target_group(self, token, tenant_id, name,
l2_policy_id=None, ext_data=None):
""" Creates a GBP Policy Target Group
:param ext_data:
:param token: A scoped token
:param tenant_id: Tenant UUID
:param name: PTG name
:return: PTG dict
"""
policy_target_group_info = {
"policy_target_group": {
"tenant_id": tenant_id,
"name": name,
}
}
ext_data = ext_data or {}
if l2_policy_id:
policy_target_group_info["policy_target_group"].update(
{"l2_policy_id": l2_policy_id})
policy_target_group_info['policy_target_group'].update(ext_data)
try:
gbp = gbp_client.Client(token=token,
endpoint_url=self.network_service)
return gbp.create_policy_target_group(
body=policy_target_group_info)['policy_target_group']
except Exception as ex:
err = ("Failed to create policy target group. %s"
" Error :: %s" % (policy_target_group_info, ex))
LOG.error(err)
raise Exception(err)
def create_l2_policy(self, token, tenant_id, name, l3_policy_id=None,
description=''):
l2_policy_info = {
"l2_policy": {
"tenant_id": tenant_id,
"name": name
}
}
if l3_policy_id:
l2_policy_info["l2_policy"].update({'l3_policy_id': l3_policy_id})
if description:
l2_policy_info["description"].update({'description': description})
try:
gbp = gbp_client.Client(token=token,
endpoint_url=self.network_service)
return gbp.create_l2_policy(body=l2_policy_info)['l2_policy']
except Exception as ex:
err = ("Failed to create l2 policy under tenant"
" %s. Error :: %s" % (tenant_id, ex))
LOG.error(err)
raise Exception(err)
def delete_l2_policy(self, token, l2policy_id):
"""
:param token:
:param l2policy_id:
:return:
"""
try:
gbp = gbp_client.Client(token=token,
endpoint_url=self.network_service)
return gbp.delete_l2_policy(l2policy_id)
except Exception as ex:
err = ("Failed to delete l2 policy %s. Reason %s" %
(l2policy_id, ex))
LOG.error(err)
raise Exception(err)
# NOTE: The plural form in the function name is needed in that way
# to construct the function generically
def get_l2_policys(self, token, filters=None):
""" List L2 policies
:param token: A scoped_token
:param filters: Parameters for list filter
example for filter: {}
:return: L2 policies List
"""
try:
gbp = gbp_client.Client(token=token,
endpoint_url=self.network_service)
filters = filters if filters is not None else {}
return gbp.list_l2_policies(**filters)['l2_policies']
except Exception as ex:
err = ("Failed to list l2 policies. Reason %s" % ex)
LOG.error(err)
raise Exception(err)
def get_l2_policy(self, token, policy_id, filters=None):
""" List L2 policies
:param token: A scoped_token
:param policy_id: l2 policy id
:param filters: Parameters for list filter
example for filter: {}
:return: L2 policies List
"""
try:
gbp = gbp_client.Client(token=token,
endpoint_url=self.network_service)
filters = filters if filters is not None else {}
return gbp.show_l2_policy(
policy_id, **filters)['l2_policy']
except Exception as ex:
err = ("Failed to read l2 policy list from"
" Openstack Neutron service's response."
" KeyError :: %s" % (ex))
LOG.error(err)
raise Exception(err)
def create_network_service_policy(self, token,
network_service_policy_info):
try:
gbp = gbp_client.Client(token=token,
endpoint_url=self.network_service)
return gbp.create_network_service_policy(
body=network_service_policy_info)['network_service_policy']
except Exception as ex:
err = ("Failed to create network service policy "
"Error :: %s" % (ex))
LOG.error(err)
raise Exception(err)
def get_network_service_policies(self, token, filters=None):
""" List network service policies
:param token: A scoped_token
:param filters: Parameters for list filter
example for filter: {}
:return: network service policy List
"""
try:
gbp = gbp_client.Client(token=token,
endpoint_url=self.network_service)
filters = filters if filters is not None else {}
return gbp.list_network_service_policies(**filters)[
'network_service_policies']
except Exception as ex:
err = ("Failed to list network service policies. Reason %s" % ex)
LOG.error(err)
raise Exception(err)
def get_external_policies(self, token, filters=None):
""" List external policies
:param token: A scoped_token
:param filters: Parameters for list filter
example for filter: {}
:return: external policy List
"""
try:
gbp = gbp_client.Client(token=token,
endpoint_url=self.network_service)
filters = filters if filters is not None else {}
return gbp.list_external_policies(**filters)['external_policies']
except Exception as ex:
err = ("Failed to list external policies. Reason %s" % ex)
LOG.error(err)
raise Exception(err)
def get_policy_rule_sets(self, token, filters=None):
""" List policy rule sets
:param token: A scoped_token
:param filters: Parameters for list filter
example for filter: {}
:return: policy rule set List
"""
try:
gbp = gbp_client.Client(token=token,
endpoint_url=self.network_service)
filters = filters if filters is not None else {}
return gbp.list_policy_rule_sets(**filters)['policy_rule_sets']
except Exception as ex:
err = ("Failed to list policy rule sets. Reason %s" % ex)
LOG.error(err)
raise Exception(err)
def get_policy_actions(self, token, filters=None):
""" List policy actions
:param token: A scoped_token
:param filters: Parameters for list filter
example for filter: {}
:return: policy actions List
"""
try:
gbp = gbp_client.Client(token=token,
endpoint_url=self.network_service)
filters = filters if filters is not None else {}
return gbp.list_policy_actions(**filters)['policy_actions']
except Exception as ex:
err = ("Failed to list policy actions. Reason %s" % ex)
LOG.error(err)
raise Exception(err)
def get_policy_rules(self, token, filters=None):
""" List policy rules
:param token: A scoped_token
:param filters: Parameters for list filter
example for filter: {}
:return: policy rules List
"""
try:
gbp = gbp_client.Client(token=token,
endpoint_url=self.network_service)
filters = filters if filters is not None else {}
return gbp.list_policy_rules(**filters)['policy_rules']
except Exception as ex:
err = ("Failed to list policy rules. Reason %s" % ex)
LOG.error(err)
raise Exception(err)
def create_l3_policy(self, token, l3_policy_info): # tenant_id, name):
try:
gbp = gbp_client.Client(token=token,
endpoint_url=self.network_service)
return gbp.create_l3_policy(body=l3_policy_info)['l3_policy']
except Exception as ex:
err = ("Failed to create l3 policy under tenant"
" %s. Error :: %s"
% (l3_policy_info['l3_policy']['tenant_id'], ex))
LOG.error(err)
raise Exception(err)
def get_l3_policy(self, token, policy_id, filters=None):
""" List L3 policies
:param token: A scoped_token
:param filters: Parameters for list filter
example for filter: {}
:return: L3 policies List
"""
try:
gbp = gbp_client.Client(token=token,
endpoint_url=self.network_service)
filters = filters if filters is not None else {}
return gbp.show_l3_policy(
policy_id, **filters)['l3_policy']
except Exception as ex:
err = ("Failed to read l3 policy list from"
" Openstack Neutron service's response."
" KeyError :: %s" % (ex))
LOG.error(err)
raise Exception(err)
def get_l3_policies(self, token, filters=None):
""" List L3 policies
:param token: A scoped_token
:param filters: Parameters for list filter
example for filter: {}
:return: L2 policies List
"""
try:
gbp = gbp_client.Client(token=token,
endpoint_url=self.network_service)
filters = filters if filters is not None else {}
return gbp.list_l3_policies(**filters)['l3_policies']
except Exception as ex:
err = ("Failed to list l3 policies. Reason %s" % ex)
LOG.error(err)
raise Exception(err)
def get_policy_targets(self, token, filters=None):
""" List Policy Targets
:param token: A scoped_token
:param filters: Parameters for list filter
example for filter: {}
:return: PT List
"""
try:
gbp = gbp_client.Client(token=token,
endpoint_url=self.network_service)
filters = filters if filters is not None else {}
return gbp.list_policy_targets(**filters)['policy_targets']
except Exception as ex:
err = ("Failed to read PT list."
" Error :: %s" % (ex))
LOG.error(err)
raise Exception(err)
def list_pt(self, token, filters=None):
filters = filters if filters is not None else {}
return self.get_policy_targets(token, filters=filters)
def get_policy_target(self, token, pt_id, filters=None):
try:
gbp = gbp_client.Client(token=token,
endpoint_url=self.network_service)
filters = filters if filters is not None else {}
return gbp.show_policy_target(pt_id,
**filters)['policy_target']
except Exception as ex:
err = ("Failed to read PT information"
". PT %s."
" Error :: %s" % (pt_id, ex))
LOG.error(err)
raise Exception(err)
def get_service_profile(self, token, service_profile_id):
gbp = gbp_client.Client(token=token,
endpoint_url=self.network_service)
return gbp.show_service_profile(service_profile_id)['service_profile']
def get_servicechain_node(self, token, node_id):
gbp = gbp_client.Client(token=token,
endpoint_url=self.network_service)
return gbp.show_servicechain_node(node_id)['servicechain_node']
def get_servicechain_instance(self, token, instance_id):
gbp = gbp_client.Client(token=token,
endpoint_url=self.network_service)
return gbp.show_servicechain_instance(instance_id)[
'servicechain_instance']
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.html
~~~~~~~~~~~~~~~~~~~~
Lexers for HTML, XML and related markup.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \
default, using
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Punctuation
from pygments.util import looks_like_xml, html_doctype_matches
from pygments.lexers.javascript import JavascriptLexer
from pygments.lexers.jvm import ScalaLexer
from pygments.lexers.css import CssLexer, _indentation, _starts_block
from pygments.lexers.ruby import RubyLexer
__all__ = ['HtmlLexer', 'DtdLexer', 'XmlLexer', 'XsltLexer', 'HamlLexer',
'ScamlLexer', 'JadeLexer']
class HtmlLexer(RegexLexer):
"""
For HTML 4 and XHTML 1 markup. Nested JavaScript and CSS is highlighted
by the appropriate lexer.
"""
name = 'HTML'
aliases = ['html']
filenames = ['*.html', '*.htm', '*.xhtml', '*.xslt']
mimetypes = ['text/html', 'application/xhtml+xml']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'(<)(\s*)(script)(\s*)',
bygroups(Punctuation, Text, Name.Tag, Text),
('script-content', 'tag')),
(r'(<)(\s*)(style)(\s*)',
bygroups(Punctuation, Text, Name.Tag, Text),
('style-content', 'tag')),
# note: this allows tag names not used in HTML like <x:with-dash>,
# this is to support yet-unknown template engines and the like
(r'(<)(\s*)([\w:.-]+)',
bygroups(Punctuation, Text, Name.Tag), 'tag'),
(r'(<)(\s*)(/)(\s*)([\w:.-]+)(\s*)(>)',
bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text,
Punctuation)),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'([\w:-]+\s*)(=)(\s*)', bygroups(Name.Attribute, Operator, Text),
'attr'),
(r'[\w:-]+', Name.Attribute),
(r'(/?)(\s*)(>)', bygroups(Punctuation, Text, Punctuation), '#pop'),
],
'script-content': [
(r'(<)(\s*)(/)(\s*)(script)(\s*)(>)',
bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text,
Punctuation), '#pop'),
(r'.+?(?=<\s*/\s*script\s*>)', using(JavascriptLexer)),
],
'style-content': [
(r'(<)(\s*)(/)(\s*)(style)(\s*)(>)',
bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text,
Punctuation),'#pop'),
(r'.+?(?=<\s*/\s*style\s*>)', using(CssLexer)),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if html_doctype_matches(text):
return 0.5
class DtdLexer(RegexLexer):
"""
A lexer for DTDs (Document Type Definitions).
.. versionadded:: 1.5
"""
flags = re.MULTILINE | re.DOTALL
name = 'DTD'
aliases = ['dtd']
filenames = ['*.dtd']
mimetypes = ['application/xml-dtd']
tokens = {
'root': [
include('common'),
(r'(<!ELEMENT)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'element'),
(r'(<!ATTLIST)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'attlist'),
(r'(<!ENTITY)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Entity), 'entity'),
(r'(<!NOTATION)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'notation'),
(r'(<!\[)([^\[\s]+)(\s*)(\[)', # conditional sections
bygroups(Keyword, Name.Entity, Text, Keyword)),
(r'(<!DOCTYPE)(\s+)([^>\s]+)',
bygroups(Keyword, Text, Name.Tag)),
(r'PUBLIC|SYSTEM', Keyword.Constant),
(r'[\[\]>]', Keyword),
],
'common': [
(r'\s+', Text),
(r'(%|&)[^;]*;', Name.Entity),
('<!--', Comment, 'comment'),
(r'[(|)*,?+]', Operator),
(r'"[^"]*"', String.Double),
(r'\'[^\']*\'', String.Single),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'element': [
include('common'),
(r'EMPTY|ANY|#PCDATA', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Tag),
(r'>', Keyword, '#pop'),
],
'attlist': [
include('common'),
(r'CDATA|IDREFS|IDREF|ID|NMTOKENS|NMTOKEN|ENTITIES|ENTITY|NOTATION',
Keyword.Constant),
(r'#REQUIRED|#IMPLIED|#FIXED', Keyword.Constant),
(r'xml:space|xml:lang', Keyword.Reserved),
(r'[^>\s|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
'entity': [
include('common'),
(r'SYSTEM|PUBLIC|NDATA', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Entity),
(r'>', Keyword, '#pop'),
],
'notation': [
include('common'),
(r'SYSTEM|PUBLIC', Keyword.Constant),
(r'[^>\s|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
}
def analyse_text(text):
if not looks_like_xml(text) and \
('<!ELEMENT' in text or '<!ATTLIST' in text or '<!ENTITY' in text):
return 0.8
class XmlLexer(RegexLexer):
"""
Generic lexer for XML (eXtensible Markup Language).
"""
flags = re.MULTILINE | re.DOTALL | re.UNICODE
name = 'XML'
aliases = ['xml']
filenames = ['*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd',
'*.wsdl', '*.wsf']
mimetypes = ['text/xml', 'application/xml', 'image/svg+xml',
'application/rss+xml', 'application/atom+xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if looks_like_xml(text):
return 0.45 # less than HTML
class XsltLexer(XmlLexer):
"""
A lexer for XSLT.
.. versionadded:: 0.10
"""
name = 'XSLT'
aliases = ['xslt']
filenames = ['*.xsl', '*.xslt', '*.xpl'] # xpl is XProc
mimetypes = ['application/xsl+xml', 'application/xslt+xml']
EXTRA_KEYWORDS = set((
'apply-imports', 'apply-templates', 'attribute',
'attribute-set', 'call-template', 'choose', 'comment',
'copy', 'copy-of', 'decimal-format', 'element', 'fallback',
'for-each', 'if', 'import', 'include', 'key', 'message',
'namespace-alias', 'number', 'otherwise', 'output', 'param',
'preserve-space', 'processing-instruction', 'sort',
'strip-space', 'stylesheet', 'template', 'text', 'transform',
'value-of', 'variable', 'when', 'with-param'
))
def get_tokens_unprocessed(self, text):
for index, token, value in XmlLexer.get_tokens_unprocessed(self, text):
m = re.match('</?xsl:([^>]*)/?>?', value)
if token is Name.Tag and m and m.group(1) in self.EXTRA_KEYWORDS:
yield index, Keyword, value
else:
yield index, token, value
def analyse_text(text):
if looks_like_xml(text) and '<xsl' in text:
return 0.8
class HamlLexer(ExtendedRegexLexer):
"""
For Haml markup.
.. versionadded:: 1.3
"""
name = 'Haml'
aliases = ['haml']
filenames = ['*.haml']
mimetypes = ['text/x-haml']
flags = re.IGNORECASE
# Haml can include " |\n" anywhere,
# which is ignored and used to wrap long lines.
# To accomodate this, use this custom faux dot instead.
_dot = r'(?: \|\n(?=.* \|)|.)'
# In certain places, a comma at the end of the line
# allows line wrapping as well.
_comma_dot = r'(?:,\s*\n|' + _dot + ')'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'root'),
default('plain'),
],
'content': [
include('css'),
(r'%[\w:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'haml-comment-block'), '#pop'),
(r'(-)(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(RubyLexer)),
(r'\[' + _dot + '*?\]', using(RubyLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'haml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class ScamlLexer(ExtendedRegexLexer):
"""
For `Scaml markup <http://scalate.fusesource.org/>`_. Scaml is Haml for Scala.
.. versionadded:: 1.4
"""
name = 'Scaml'
aliases = ['scaml']
filenames = ['*.scaml']
mimetypes = ['text/x-scaml']
flags = re.IGNORECASE
# Scaml does not yet support the " |\n" notation to
# wrap long lines. Once it does, use the custom faux
# dot instead.
# _dot = r'(?: \|\n(?=.* \|)|.)'
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'root'),
default('plain'),
],
'content': [
include('css'),
(r'%[\w:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)),
(r'\[' + _dot + '*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class JadeLexer(ExtendedRegexLexer):
"""
For Jade markup.
Jade is a variant of Scaml, see:
http://scalate.fusesource.org/documentation/scaml-reference.html
.. versionadded:: 1.4
"""
name = 'Jade'
aliases = ['jade']
filenames = ['*.jade']
mimetypes = ['text/x-jade']
flags = re.IGNORECASE
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)), 'root'),
default('plain'),
],
'content': [
include('css'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
(r'[\w:-]+', Name.Tag, 'tag'),
(r'\|', Text, 'eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)),
(r'\[' + _dot + '*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
|
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from rally.common import costilius
from rally.task.processing import charts
from tests.unit import test
CHARTS = "rally.task.processing.charts."
class ChartTestCase(test.TestCase):
class Chart(charts.Chart):
def _map_iteration_values(self, iteration):
return [("foo_" + k, iteration[k]) for k in ["a", "b"]]
@property
def bench_info(self):
return {"iterations_count": 42, "atomic": {"a": {}, "b": {}, "c": {}}}
def test___init__(self):
self.assertRaises(TypeError, charts.Chart, self.bench_info)
chart = self.Chart(self.bench_info)
self.assertEqual({}, chart._data)
self.assertEqual(42, chart.base_size)
self.assertEqual(1000, chart.zipped_size)
chart = self.Chart(self.bench_info, zipped_size=24)
self.assertEqual({}, chart._data)
self.assertEqual(42, chart.base_size)
self.assertEqual(24, chart.zipped_size)
@mock.patch(CHARTS + "utils.GraphZipper")
def test_add_iteration_and_render(self, mock_graph_zipper):
gzipper_a = mock.Mock(get_zipped_graph=lambda: "a_points")
gzipper_b = mock.Mock(get_zipped_graph=lambda: "b_points")
mock_graph_zipper.side_effect = [gzipper_a, gzipper_b]
chart = self.Chart(self.bench_info, 24)
self.assertEqual([], chart.render())
[chart.add_iteration(itr) for itr in [{"a": 1, "b": 2},
{"a": 3, "b": 4}]]
self.assertEqual([mock.call(42, 24), mock.call(42, 24)],
mock_graph_zipper.mock_calls)
self.assertEqual(2, len(chart._data))
self.assertEqual([mock.call(1), mock.call(3)],
chart._data["foo_a"].add_point.mock_calls)
self.assertEqual([mock.call(2), mock.call(4)],
chart._data["foo_b"].add_point.mock_calls)
self.assertEqual([("foo_a", "a_points"), ("foo_b", "b_points")],
chart.render())
def test__fix_atomic_actions(self):
chart = self.Chart(self.bench_info)
self.assertEqual(
{"atomic_actions": {"a": 5, "b": 6, "c": 0}},
chart._fix_atomic_actions({"atomic_actions": {"a": 5, "b": 6}}))
class MainStackedAreaChartTestCase(test.TestCase):
def test_add_iteration_and_render(self):
chart = charts.MainStackedAreaChart({"iterations_count": 3}, 10)
self.assertIsInstance(chart, charts.Chart)
[chart.add_iteration(itr) for itr in (
{"duration": 1.1, "idle_duration": 2.2, "error": None},
{"error": True, "duration": 1.1, "idle_duration": 0.5},
{"duration": 1.3, "idle_duration": 3.4, "error": None})]
expected = [("duration", [[1, 1.1], [2, 0], [3, 1.3]]),
("idle_duration", [[1, 2.2], [2, 0], [3, 3.4]]),
("failed_duration", [[1, 0], [2, 1.6], [3, 0]])]
self.assertEqual(expected, chart.render())
class AtomicStackedAreaChartTestCase(test.TestCase):
def test_add_iteration_and_render(self):
iterations = (
{"atomic_actions": {"foo": 1.1}, "error": False},
{"atomic_actions": {"foo": 1.1, "bar": 1.2},
"error": True, "duration": 40, "idle_duration": 2},
{"atomic_actions": {"bar": 1.2},
"error": True, "duration": 5.5, "idle_duration": 2.5})
expected = [("bar", [[1, 0], [2, 1.2], [3, 1.2]]),
("failed_duration", [[1, 0], [2, 39.7], [3, 6.8]]),
("foo", [[1, 1.1], [2, 1.1], [3, 0]])]
chart = charts.AtomicStackedAreaChart(
{"iterations_count": 3, "atomic": {"foo": {}, "bar": {}}}, 10)
self.assertIsInstance(chart, charts.Chart)
[chart.add_iteration(iteration) for iteration in iterations]
self.assertEqual(expected, sorted(chart.render()))
class OutputStackedAreaChartTestCase(test.TestCase):
def test_add_iteration_and_render(self):
chart = charts.OutputStackedAreaChart(
{"iterations_count": 3, "output_names": ["foo", "bar"]}, 10)
self.assertIsInstance(chart, charts.Chart)
[chart.add_iteration({"scenario_output": {"data": x}})
for x in ({"foo": 1.1, "bar": 1.2}, {"foo": 1.3}, {"bar": 1.4})]
expected = [("bar", [[1, 1.2], [2, 0], [3, 1.4]]),
("foo", [[1, 1.1], [2, 1.3], [3, 0]])]
self.assertEqual(expected, sorted(chart.render()))
class AvgChartTestCase(test.TestCase):
class AvgChart(charts.AvgChart):
def _map_iteration_values(self, iteration):
return iteration["foo"].items()
def test_add_iteration_and_render(self):
self.assertRaises(TypeError, charts.AvgChart, {"iterations_count": 3})
chart = self.AvgChart({"iterations_count": 3})
self.assertIsInstance(chart, charts.AvgChart)
[chart.add_iteration({"foo": x}) for x in ({"a": 1.3, "b": 4.3},
{"a": 2.4, "b": 5.4},
{"a": 3.5, "b": 7.7})]
self.assertEqual([("a", 2.4), ("b", 5.8)], sorted(chart.render()))
class AtomicAvgChartTestCase(test.TestCase):
def test_add_iteration_and_render(self):
chart = charts.AtomicAvgChart({"iterations_count": 3,
"atomic": {"foo": {}, "bar": {}}})
self.assertIsInstance(chart, charts.AvgChart)
[chart.add_iteration({"atomic_actions": costilius.OrderedDict(a)})
for a in ([("foo", 2), ("bar", 5)], [("foo", 4)], [("bar", 7)])]
self.assertEqual([("bar", 4.0), ("foo", 2.0)], sorted(chart.render()))
@ddt.ddt
class HistogramChartTestCase(test.TestCase):
class HistogramChart(charts.HistogramChart):
def __init__(self, benchmark_info):
super(HistogramChartTestCase.HistogramChart,
self).__init__(benchmark_info)
self._data["bar"] = {"views": self._init_views(1.2, 4.2),
"disabled": None}
def _map_iteration_values(self, iteration):
return iteration["foo"].items()
def test_add_iteration_and_render(self):
self.assertRaises(TypeError, charts.HistogramChart,
{"iterations_count": 3})
chart = self.HistogramChart({"iterations_count": 3})
self.assertIsInstance(chart, charts.HistogramChart)
[chart.add_iteration({"foo": x}) for x in ({"bar": 1.2}, {"bar": 2.4},
{"bar": 4.2})]
expected = [[{"disabled": None, "key": "bar",
"values": [{"x": 2.7, "y": 2}, {"x": 4.2, "y": 1}],
"view": "Square Root Choice"},
{"disabled": None, "key": "bar",
"values": [{"x": 2.2, "y": 1}, {"x": 3.2, "y": 1},
{"x": 4.2, "y": 1}],
"view": "Sturges Formula"},
{"disabled": None,
"key": "bar",
"values": [{"x": 2.2, "y": 1}, {"x": 3.2, "y": 1},
{"x": 4.2, "y": 1}],
"view": "Rice Rule"},
{"disabled": None, "key": "bar",
"values": [{"x": 2.7, "y": 2}, {"x": 4.2, "y": 1}],
"view": "One Half"}]]
self.assertEqual(expected, chart.render())
@ddt.data(
{"base_size": 2, "min_value": 1, "max_value": 4,
"expected": [{"bins": 2, "view": "Square Root Choice",
"x": [2.5, 4.0], "y": [0, 0]},
{"bins": 2, "view": "Sturges Formula",
"x": [2.5, 4.0], "y": [0, 0]},
{"bins": 3, "view": "Rice Rule",
"x": [2.0, 3.0, 4.0], "y": [0, 0, 0]},
{"bins": 1, "view": "One Half", "x": [4.0], "y": [0]}]},
{"base_size": 100, "min_value": 27, "max_value": 42,
"expected": [
{"bins": 10, "view": "Square Root Choice",
"x": [28.5, 30.0, 31.5, 33.0, 34.5, 36.0, 37.5, 39.0, 40.5,
42.0], "y": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]},
{"bins": 8, "view": "Sturges Formula",
"x": [28.875, 30.75, 32.625, 34.5, 36.375, 38.25, 40.125,
42.0], "y": [0, 0, 0, 0, 0, 0, 0, 0]},
{"bins": 10, "view": "Rice Rule",
"x": [28.5, 30.0, 31.5, 33.0, 34.5, 36.0, 37.5, 39.0, 40.5,
42.0], "y": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]},
{"bins": 50, "view": "One Half",
"x": [27.3, 27.6, 27.9, 28.2, 28.5, 28.8, 29.1, 29.4, 29.7,
30.0, 30.3, 30.6, 30.9, 31.2, 31.5, 31.8, 32.1, 32.4,
32.7, 33.0, 33.3, 33.6, 33.9, 34.2, 34.5, 34.8, 35.1,
35.4, 35.7, 36.0, 36.3, 36.6, 36.9, 37.2, 37.5, 37.8,
38.1, 38.4, 38.7, 39.0, 39.3, 39.6, 39.9, 40.2, 40.5,
40.8, 41.1, 41.4, 41.7, 42.0], "y": [0] * 50}]})
@ddt.unpack
def test_views(self, base_size=None, min_value=None, max_value=None,
expected=None):
chart = self.HistogramChart({"iterations_count": base_size})
self.assertEqual(expected, chart._init_views(min_value, max_value))
class MainHistogramChartTestCase(test.TestCase):
def test_add_iteration_and_render(self):
chart = charts.MainHistogramChart(
{"iterations_count": 3, "min_duration": 2, "max_duration": 7})
self.assertIsInstance(chart, charts.HistogramChart)
[chart.add_iteration(itr) for itr in (
{"duration": 1.1, "idle_duration": 2.2, "error": None},
{"error": True},
{"duration": 1.3, "idle_duration": 3.4, "error": None})]
expected = [
{"disabled": None, "key": "task", "view": "Square Root Choice",
"values": [{"x": 4.5, "y": 3}, {"x": 7.0, "y": 0}]},
{"disabled": None, "key": "task", "view": "Sturges Formula",
"values": [{"x": 3.666666666666667, "y": 3},
{"x": 5.333333333333334, "y": 0},
{"x": 7.0, "y": 0}]},
{"disabled": None, "key": "task", "view": "Rice Rule",
"values": [{"x": 3.666666666666667, "y": 3},
{"x": 5.333333333333334, "y": 0},
{"x": 7.0, "y": 0}]},
{"disabled": None, "key": "task", "view": "One Half",
"values": [{"x": 4.5, "y": 3}, {"x": 7.0, "y": 0}]}]
self.assertEqual([expected], chart.render())
class AtomicHistogramChartTestCase(test.TestCase):
def test_add_iteration_and_render(self):
chart = charts.AtomicHistogramChart(
{"iterations_count": 3,
"atomic": costilius.OrderedDict(
[("foo", {"min_duration": 1.6, "max_duration": 2.8}),
("bar", {"min_duration": 3.1, "max_duration": 5.5})])})
self.assertIsInstance(chart, charts.HistogramChart)
[chart.add_iteration({"atomic_actions": a})
for a in ({"foo": 1.6, "bar": 3.1}, {"foo": 2.8}, {"bar": 5.5})]
expected = [
[{"disabled": 0, "key": "foo", "view": "Square Root Choice",
"values": [{"x": 2.2, "y": 2}, {"x": 2.8, "y": 1}]},
{"disabled": 0, "key": "foo", "view": "Sturges Formula",
"values": [{"x": 2.0, "y": 2}, {"x": 2.4, "y": 0},
{"x": 2.8, "y": 1}]},
{"disabled": 0, "key": "foo", "view": "Rice Rule",
"values": [{"x": 2.0, "y": 2}, {"x": 2.4, "y": 0},
{"x": 2.8, "y": 1}]},
{"disabled": 0, "key": "foo", "view": "One Half",
"values": [{"x": 2.2, "y": 2}, {"x": 2.8, "y": 1}]}],
[{"disabled": 1, "key": "bar", "view": "Square Root Choice",
"values": [{"x": 4.3, "y": 2}, {"x": 5.5, "y": 1}]},
{"disabled": 1, "key": "bar", "view": "Sturges Formula",
"values": [{"x": 3.9, "y": 2}, {"x": 4.7, "y": 0},
{"x": 5.5, "y": 1}]},
{"disabled": 1, "key": "bar", "view": "Rice Rule",
"values": [{"x": 3.9, "y": 2}, {"x": 4.7, "y": 0},
{"x": 5.5, "y": 1}]},
{"disabled": 1, "key": "bar", "view": "One Half",
"values": [{"x": 4.3, "y": 2}, {"x": 5.5, "y": 1}]}]]
self.assertEqual(expected, chart.render())
class TableTestCase(test.TestCase):
class Table(charts.Table):
columns = ["name", "foo", "bar"]
foo = mock.Mock()
bar = mock.Mock()
def _init_columns(self):
return costilius.OrderedDict(
[("foo", self.foo), ("bar", self.bar)])
def _map_iteration_values(self, iteration):
return [("value_" + k, iteration[k]) for k in ["a", "b"]]
def render(self):
return self._data
def setUp(self, *args, **kwargs):
super(TableTestCase, self).setUp(*args, **kwargs)
self.bench_info = {"iterations_count": 42,
"atomic": {"a": {}, "b": {}, "c": {}}}
def test_add_iteration_and_render(self):
self.assertRaises(TypeError, charts.Table, self.bench_info)
table = self.Table(self.bench_info)
self.assertEqual(costilius.OrderedDict(), table.render())
[table.add_iteration({"a": i, "b": 43 - i}) for i in range(1, 43)]
self.assertEqual(
costilius.OrderedDict(
[("value_a", costilius.OrderedDict([("foo", table.foo),
("bar", table.bar)])),
("value_b", costilius.OrderedDict([("foo", table.foo),
("bar", table.bar)]))]),
table.render())
class MainStatsTableTestCase(test.TestCase):
def setUp(self, *args, **kwargs):
super(MainStatsTableTestCase, self).setUp(*args, **kwargs)
self.bench_info = {"iterations_count": 42,
"atomic": {"a": {}, "b": {}, "c": {}}}
self.columns = [
"Action", "Min (sec)", "Median (sec)", "90%ile (sec)",
"95%ile (sec)", "Max (sec)", "Avg (sec)", "Success", "Count"]
def test_add_iteration_and_render(self):
table = charts.MainStatsTable({"iterations_count": 42,
"atomic": {"foo": {}, "bar": {}}})
[table.add_iteration(
{"atomic_actions": costilius.OrderedDict([("foo", i),
("bar", 43 - 1)]),
"duration": i, "error": i % 40}) for i in range(1, 43)]
expected_rows = [
["foo", 1.0, 21.5, 38.5, 40.5, 42.0, 21.5, "100.0%", 42.0],
["bar", 42.0, 42.0, 42.0, 42.0, 42.0, 42.0, "100.0%", 42.0],
["total", 0.0, 0.0, 0.0, 0.0, 40.0, 0.952, "100.0%", 42.0]]
self.assertEqual({"cols": self.columns, "rows": expected_rows},
table.render())
|
|
"""
Core OpenBCI object for handling connections and samples from the board.
EXAMPLE USE:
def handle_sample(sample):
print(sample.channel_data)
board = OpenBCIBoard()
board.print_register_settings()
board.start_streaming(handle_sample)
NOTE: If daisy modules is enabled, the callback will occur every two samples, hence "packet_id" will only contain even numbers. As a side effect, the sampling rate will be divided by 2.
FIXME: at the moment we can just force daisy mode, do not check that the module is detected.
TODO: enable impedance
"""
import serial
import struct
import numpy as np
import time
import timeit
import atexit
import logging
import threading
import sys
import pdb
import glob
SAMPLE_RATE = 250.0 # Hz
START_BYTE = 0xA0 # start of data packet
END_BYTE = 0xC0 # end of data packet
ADS1299_Vref = 4.5 #reference voltage for ADC in ADS1299. set by its hardware
ADS1299_gain = 24.0 #assumed gain setting for ADS1299. set by its Arduino code
scale_fac_uVolts_per_count = ADS1299_Vref/float((pow(2,23)-1))/ADS1299_gain*1000000.
scale_fac_accel_G_per_count = 0.002 /(pow(2,4)) #assume set to +/4G, so 2 mG
'''
#Commands for in SDK http://docs.openbci.com/software/01-Open BCI_SDK:
command_stop = "s";
command_startText = "x";
command_startBinary = "b";
command_startBinary_wAux = "n";
command_startBinary_4chan = "v";
command_activateFilters = "F";
command_deactivateFilters = "g";
command_deactivate_channel = {"1", "2", "3", "4", "5", "6", "7", "8"};
command_activate_channel = {"q", "w", "e", "r", "t", "y", "u", "i"};
command_activate_leadoffP_channel = {"!", "@", "#", "$", "%", "^", "&", "*"}; //shift + 1-8
command_deactivate_leadoffP_channel = {"Q", "W", "E", "R", "T", "Y", "U", "I"}; //letters (plus shift) right below 1-8
command_activate_leadoffN_channel = {"A", "S", "D", "F", "G", "H", "J", "K"}; //letters (plus shift) below the letters below 1-8
command_deactivate_leadoffN_channel = {"Z", "X", "C", "V", "B", "N", "M", "<"}; //letters (plus shift) below the letters below the letters below 1-8
command_biasAuto = "`";
command_biasFixed = "~";
'''
class OpenBCIBoard(object):
"""
Handle a connection to an OpenBCI board.
Args:
port: The port to connect to.
baud: The baud of the serial connection.
daisy: Enable or disable daisy module and 16 chans readings
aux, impedance: unused, for compatibility with ganglion API
"""
def __init__(self, port=None, baud=115200, filter_data=True,
scaled_output=True, daisy=False, aux=False, impedance=False, log=True, timeout=None):
self.log = log # print_incoming_text needs log
self.streaming = False
self.baudrate = baud
self.timeout = timeout
if not port:
port = self.find_port()
self.port = port
# might be handy to know API
self.board_type = "cyton"
print("Connecting to V3 at port %s" %(port))
self.ser = serial.Serial(port= port, baudrate = baud, timeout=timeout)
print("Serial established...")
time.sleep(2)
#Initialize 32-bit board, doesn't affect 8bit board
self.ser.write(b'v');
#wait for device to be ready
time.sleep(1)
self.print_incoming_text()
self.streaming = False
self.filtering_data = filter_data
self.scaling_output = scaled_output
self.eeg_channels_per_sample = 8 # number of EEG channels per sample *from the board*
self.aux_channels_per_sample = 3 # number of AUX channels per sample *from the board*
self.imp_channels_per_sample = 0 # impedance check not supported at the moment
self.read_state = 0
self.daisy = daisy
self.last_odd_sample = OpenBCISample(-1, [], []) # used for daisy
self.log_packet_count = 0
self.attempt_reconnect = False
self.last_reconnect = 0
self.reconnect_freq = 5
self.packets_dropped = 0
#Disconnects from board when terminated
atexit.register(self.disconnect)
def getBoardType(self):
""" Returns the version of the board """
return self.board_type
def setImpedance(self, flag):
""" Enable/disable impedance measure. Not implemented at the moment on Cyton. """
return
def ser_write(self, b):
"""Access serial port object for write"""
self.ser.write(b)
def ser_read(self):
"""Access serial port object for read"""
return self.ser.read()
def ser_inWaiting(self):
"""Access serial port object for inWaiting"""
return self.ser.inWaiting();
def getSampleRate(self):
if self.daisy:
return SAMPLE_RATE/2
else:
return SAMPLE_RATE
def getNbEEGChannels(self):
if self.daisy:
return self.eeg_channels_per_sample*2
else:
return self.eeg_channels_per_sample
def getNbAUXChannels(self):
return self.aux_channels_per_sample
def getNbImpChannels(self):
return self.imp_channels_per_sample
def start_streaming(self, callback, lapse=-1):
"""
Start handling streaming data from the board. Call a provided callback
for every single sample that is processed (every two samples with daisy module).
Args:
callback: A callback function -- or a list of functions -- that will receive a single argument of the
OpenBCISample object captured.
"""
if not self.streaming:
self.ser.write(b'b')
self.streaming = True
start_time = timeit.default_timer()
# Enclose callback funtion in a list if it comes alone
if not isinstance(callback, list):
callback = [callback]
#Initialize check connection
self.check_connection()
while self.streaming:
# read current sample
sample = self._read_serial_binary()
# if a daisy module is attached, wait to concatenate two samples (main board + daisy) before passing it to callback
if self.daisy:
# odd sample: daisy sample, save for later
if ~sample.id % 2:
self.last_odd_sample = sample
# even sample: concatenate and send if last sample was the fist part, otherwise drop the packet
elif sample.id - 1 == self.last_odd_sample.id:
# the aux data will be the average between the two samples, as the channel samples themselves have been averaged by the board
avg_aux_data = list((np.array(sample.aux_data) + np.array(self.last_odd_sample.aux_data))/2)
whole_sample = OpenBCISample(sample.id, sample.channel_data + self.last_odd_sample.channel_data, avg_aux_data)
for call in callback:
call(whole_sample)
else:
for call in callback:
call(sample)
if(lapse > 0 and timeit.default_timer() - start_time > lapse):
self.stop();
if self.log:
self.log_packet_count = self.log_packet_count + 1;
"""
PARSER:
Parses incoming data packet into OpenBCISample.
Incoming Packet Structure:
Start Byte(1)|Sample ID(1)|Channel Data(24)|Aux Data(6)|End Byte(1)
0xA0|0-255|8, 3-byte signed ints|3 2-byte signed ints|0xC0
"""
def _read_serial_binary(self, max_bytes_to_skip=3000):
def read(n):
bb = self.ser.read(n)
if not bb:
self.warn('Device appears to be stalled. Quitting...')
sys.exit()
raise Exception('Device Stalled')
sys.exit()
return '\xFF'
else:
return bb
for rep in range(max_bytes_to_skip):
#---------Start Byte & ID---------
if self.read_state == 0:
b = read(1)
if struct.unpack('B', b)[0] == START_BYTE:
if(rep != 0):
self.warn('Skipped %d bytes before start found' %(rep))
rep = 0;
packet_id = struct.unpack('B', read(1))[0] #packet id goes from 0-255
log_bytes_in = str(packet_id);
self.read_state = 1
#---------Channel Data---------
elif self.read_state == 1:
channel_data = []
for c in range(self.eeg_channels_per_sample):
#3 byte ints
literal_read = read(3)
unpacked = struct.unpack('3B', literal_read)
log_bytes_in = log_bytes_in + '|' + str(literal_read);
#3byte int in 2s compliment
if (unpacked[0] >= 127):
pre_fix = bytes(bytearray.fromhex('FF'))
else:
pre_fix = bytes(bytearray.fromhex('00'))
literal_read = pre_fix + literal_read;
#unpack little endian(>) signed integer(i) (makes unpacking platform independent)
myInt = struct.unpack('>i', literal_read)[0]
if self.scaling_output:
channel_data.append(myInt*scale_fac_uVolts_per_count)
else:
channel_data.append(myInt)
self.read_state = 2;
#---------Accelerometer Data---------
elif self.read_state == 2:
aux_data = []
for a in range(self.aux_channels_per_sample):
#short = h
acc = struct.unpack('>h', read(2))[0]
log_bytes_in = log_bytes_in + '|' + str(acc);
if self.scaling_output:
aux_data.append(acc*scale_fac_accel_G_per_count)
else:
aux_data.append(acc)
self.read_state = 3;
#---------End Byte---------
elif self.read_state == 3:
val = struct.unpack('B', read(1))[0]
log_bytes_in = log_bytes_in + '|' + str(val);
self.read_state = 0 #read next packet
if (val == END_BYTE):
sample = OpenBCISample(packet_id, channel_data, aux_data)
self.packets_dropped = 0
return sample
else:
self.warn("ID:<%d> <Unexpected END_BYTE found <%s> instead of <%s>"
%(packet_id, val, END_BYTE))
logging.debug(log_bytes_in);
self.packets_dropped = self.packets_dropped + 1
"""
Clean Up (atexit)
"""
def stop(self):
print("Stopping streaming...\nWait for buffer to flush...")
self.streaming = False
self.ser.write(b's')
if self.log:
logging.warning('sent <s>: stopped streaming')
def disconnect(self):
if(self.streaming == True):
self.stop()
if (self.ser.isOpen()):
print("Closing Serial...")
self.ser.close()
logging.warning('serial closed')
"""
SETTINGS AND HELPERS
"""
def warn(self, text):
if self.log:
#log how many packets where sent succesfully in between warnings
if self.log_packet_count:
logging.info('Data packets received:'+str(self.log_packet_count))
self.log_packet_count = 0;
logging.warning(text)
print("Warning: %s" % text)
def print_incoming_text(self):
"""
When starting the connection, print all the debug data until
we get to a line with the end sequence '$$$'.
"""
line = ''
#Wait for device to send data
time.sleep(1)
if self.ser.inWaiting():
line = ''
c = ''
#Look for end sequence $$$
while '$$$' not in line:
c = self.ser.read().decode('utf-8', errors='replace') # we're supposed to get UTF8 text, but the board might behave otherwise
line += c
print(line);
else:
self.warn("No Message")
def openbci_id(self, serial):
"""
When automatically detecting port, parse the serial return for the "OpenBCI" ID.
"""
line = ''
#Wait for device to send data
time.sleep(2)
if serial.inWaiting():
line = ''
c = ''
#Look for end sequence $$$
while '$$$' not in line:
c = serial.read().decode('utf-8', errors='replace') # we're supposed to get UTF8 text, but the board might behave otherwise
line += c
if "OpenBCI" in line:
return True
return False
def print_register_settings(self):
self.ser.write(b'?')
time.sleep(0.5)
self.print_incoming_text();
#DEBBUGING: Prints individual incoming bytes
def print_bytes_in(self):
if not self.streaming:
self.ser.write(b'b')
self.streaming = True
while self.streaming:
print(struct.unpack('B',self.ser.read())[0]);
'''Incoming Packet Structure:
Start Byte(1)|Sample ID(1)|Channel Data(24)|Aux Data(6)|End Byte(1)
0xA0|0-255|8, 3-byte signed ints|3 2-byte signed ints|0xC0'''
def print_packets_in(self):
while self.streaming:
b = struct.unpack('B', self.ser.read())[0];
if b == START_BYTE:
self.attempt_reconnect = False
if skipped_str:
logging.debug('SKIPPED\n' + skipped_str + '\nSKIPPED')
skipped_str = ''
packet_str = "%03d"%(b) + '|';
b = struct.unpack('B', self.ser.read())[0];
packet_str = packet_str + "%03d"%(b) + '|';
#data channels
for i in range(24-1):
b = struct.unpack('B', self.ser.read())[0];
packet_str = packet_str + '.' + "%03d"%(b);
b = struct.unpack('B', self.ser.read())[0];
packet_str = packet_str + '.' + "%03d"%(b) + '|';
#aux channels
for i in range(6-1):
b = struct.unpack('B', self.ser.read())[0];
packet_str = packet_str + '.' + "%03d"%(b);
b = struct.unpack('B', self.ser.read())[0];
packet_str = packet_str + '.' + "%03d"%(b) + '|';
#end byte
b = struct.unpack('B', self.ser.read())[0];
#Valid Packet
if b == END_BYTE:
packet_str = packet_str + '.' + "%03d"%(b) + '|VAL';
print(packet_str)
#logging.debug(packet_str)
#Invalid Packet
else:
packet_str = packet_str + '.' + "%03d"%(b) + '|INV';
#Reset
self.attempt_reconnect = True
else:
print(b)
if b == END_BYTE:
skipped_str = skipped_str + '|END|'
else:
skipped_str = skipped_str + "%03d"%(b) + '.'
if self.attempt_reconnect and (timeit.default_timer()-self.last_reconnect) > self.reconnect_freq:
self.last_reconnect = timeit.default_timer()
self.warn('Reconnecting')
self.reconnect()
def check_connection(self, interval = 2, max_packets_to_skip=10):
# stop checking when we're no longer streaming
if not self.streaming:
return
#check number of dropped packages and establish connection problem if too large
if self.packets_dropped > max_packets_to_skip:
#if error, attempt to reconect
self.reconnect()
# check again again in 2 seconds
threading.Timer(interval, self.check_connection).start()
def reconnect(self):
self.packets_dropped = 0
self.warn('Reconnecting')
self.stop()
time.sleep(0.5)
self.ser.write(b'v')
time.sleep(0.5)
self.ser.write(b'b')
time.sleep(0.5)
self.streaming = True
#self.attempt_reconnect = False
#Adds a filter at 60hz to cancel out ambient electrical noise
def enable_filters(self):
self.ser.write(b'f')
self.filtering_data = True;
def disable_filters(self):
self.ser.write(b'g')
self.filtering_data = False;
def test_signal(self, signal):
""" Enable / disable test signal """
if signal == 0:
self.ser.write(b'0')
self.warn("Connecting all pins to ground")
elif signal == 1:
self.ser.write(b'p')
self.warn("Connecting all pins to Vcc")
elif signal == 2:
self.ser.write(b'-')
self.warn("Connecting pins to low frequency 1x amp signal")
elif signal == 3:
self.ser.write(b'=')
self.warn("Connecting pins to high frequency 1x amp signal")
elif signal == 4:
self.ser.write(b'[')
self.warn("Connecting pins to low frequency 2x amp signal")
elif signal == 5:
self.ser.write(b']')
self.warn("Connecting pins to high frequency 2x amp signal")
else:
self.warn("%s is not a known test signal. Valid signals go from 0-5" %(signal))
def set_channel(self, channel, toggle_position):
""" Enable / disable channels """
#Commands to set toggle to on position
if toggle_position == 1:
if channel is 1:
self.ser.write(b'!')
if channel is 2:
self.ser.write(b'@')
if channel is 3:
self.ser.write(b'#')
if channel is 4:
self.ser.write(b'$')
if channel is 5:
self.ser.write(b'%')
if channel is 6:
self.ser.write(b'^')
if channel is 7:
self.ser.write(b'&')
if channel is 8:
self.ser.write(b'*')
if channel is 9 and self.daisy:
self.ser.write(b'Q')
if channel is 10 and self.daisy:
self.ser.write(b'W')
if channel is 11 and self.daisy:
self.ser.write(b'E')
if channel is 12 and self.daisy:
self.ser.write(b'R')
if channel is 13 and self.daisy:
self.ser.write(b'T')
if channel is 14 and self.daisy:
self.ser.write(b'Y')
if channel is 15 and self.daisy:
self.ser.write(b'U')
if channel is 16 and self.daisy:
self.ser.write(b'I')
#Commands to set toggle to off position
elif toggle_position == 0:
if channel is 1:
self.ser.write(b'1')
if channel is 2:
self.ser.write(b'2')
if channel is 3:
self.ser.write(b'3')
if channel is 4:
self.ser.write(b'4')
if channel is 5:
self.ser.write(b'5')
if channel is 6:
self.ser.write(b'6')
if channel is 7:
self.ser.write(b'7')
if channel is 8:
self.ser.write(b'8')
if channel is 9 and self.daisy:
self.ser.write(b'q')
if channel is 10 and self.daisy:
self.ser.write(b'w')
if channel is 11 and self.daisy:
self.ser.write(b'e')
if channel is 12 and self.daisy:
self.ser.write(b'r')
if channel is 13 and self.daisy:
self.ser.write(b't')
if channel is 14 and self.daisy:
self.ser.write(b'y')
if channel is 15 and self.daisy:
self.ser.write(b'u')
if channel is 16 and self.daisy:
self.ser.write(b'i')
def find_port(self):
# Finds the serial port names
if sys.platform.startswith('win'):
ports = ['COM%s' % (i+1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
ports = glob.glob('/dev/ttyUSB*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.usbserial*')
else:
raise EnvironmentError('Error finding ports on your operating system')
openbci_port = ''
for port in ports:
try:
s = serial.Serial(port= port, baudrate = self.baudrate, timeout=self.timeout)
s.write(b'v')
openbci_serial = self.openbci_id(s)
s.close()
if openbci_serial:
openbci_port = port;
except (OSError, serial.SerialException):
pass
if openbci_port == '':
raise OSError('Cannot find OpenBCI port')
else:
return openbci_port
class OpenBCISample(object):
"""Object encapulsating a single sample from the OpenBCI board. NB: dummy imp for plugin compatiblity"""
def __init__(self, packet_id, channel_data, aux_data):
self.id = packet_id
self.channel_data = channel_data
self.aux_data = aux_data
self.imp_data = []
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.compute_v1.services.licenses import pagers
from google.cloud.compute_v1.types import compute
from .transports.base import LicensesTransport, DEFAULT_CLIENT_INFO
from .transports.rest import LicensesRestTransport
class LicensesClientMeta(type):
"""Metaclass for the Licenses client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[LicensesTransport]]
_transport_registry["rest"] = LicensesRestTransport
def get_transport_class(cls, label: str = None,) -> Type[LicensesTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class LicensesClient(metaclass=LicensesClientMeta):
"""The Licenses API."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "compute.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
LicensesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
LicensesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> LicensesTransport:
"""Returns the transport used by the client instance.
Returns:
LicensesTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, LicensesTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the licenses client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, LicensesTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, LicensesTransport):
# transport is a LicensesTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def delete_unary(
self,
request: Union[compute.DeleteLicenseRequest, dict] = None,
*,
project: str = None,
license_: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Deletes the specified license. *Caution* This resource is
intended for use only by third-party partners who are creating
Cloud Marketplace images.
Args:
request (Union[google.cloud.compute_v1.types.DeleteLicenseRequest, dict]):
The request object. A request message for
Licenses.Delete. See the method description for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
license_ (str):
Name of the license resource to
delete.
This corresponds to the ``license_`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
[Global](/compute/docs/reference/rest/v1/globalOperations)
\*
[Regional](/compute/docs/reference/rest/v1/regionOperations)
\*
[Zonal](/compute/docs/reference/rest/v1/zoneOperations)
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the globalOperations
resource. - For regional operations, use the
regionOperations resource. - For zonal operations, use
the zonalOperations resource. For more information, read
Global, Regional, and Zonal Resources.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, license_])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.DeleteLicenseRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.DeleteLicenseRequest):
request = compute.DeleteLicenseRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if license_ is not None:
request.license_ = license_
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get(
self,
request: Union[compute.GetLicenseRequest, dict] = None,
*,
project: str = None,
license_: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.License:
r"""Returns the specified License resource. *Caution* This resource
is intended for use only by third-party partners who are
creating Cloud Marketplace images.
Args:
request (Union[google.cloud.compute_v1.types.GetLicenseRequest, dict]):
The request object. A request message for Licenses.Get.
See the method description for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
license_ (str):
Name of the License resource to
return.
This corresponds to the ``license_`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.License:
Represents a License resource. A License represents
billing and aggregate usage data for public and
marketplace images. *Caution* This resource is intended
for use only by third-party partners who are creating
Cloud Marketplace images.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, license_])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.GetLicenseRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.GetLicenseRequest):
request = compute.GetLicenseRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if license_ is not None:
request.license_ = license_
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_iam_policy(
self,
request: Union[compute.GetIamPolicyLicenseRequest, dict] = None,
*,
project: str = None,
resource: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Policy:
r"""Gets the access control policy for a resource. May be empty if
no such policy or resource exists. *Caution* This resource is
intended for use only by third-party partners who are creating
Cloud Marketplace images.
Args:
request (Union[google.cloud.compute_v1.types.GetIamPolicyLicenseRequest, dict]):
The request object. A request message for
Licenses.GetIamPolicy. See the method description for
details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
resource (str):
Name or id of the resource for this
request.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.Policy:
An Identity and Access Management (IAM) policy, which
specifies access controls for Google Cloud resources. A
Policy is a collection of bindings. A binding binds one
or more members, or principals, to a single role.
Principals can be user accounts, service accounts,
Google groups, and domains (such as G Suite). A role is
a named list of permissions; each role can be an IAM
predefined role or a user-created custom role. For some
types of Google Cloud resources, a binding can also
specify a condition, which is a logical expression that
allows access to a resource only if the expression
evaluates to true. A condition can add constraints based
on attributes of the request, the resource, or both. To
learn which resources support conditions in their IAM
policies, see the [IAM
documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:** { "bindings": [ { "role":
"roles/resourcemanager.organizationAdmin", "members": [
"user:mike@example.com", "group:admins@example.com",
"domain:google.com",
"serviceAccount:my-project-id@appspot.gserviceaccount.com"
] }, { "role":
"roles/resourcemanager.organizationViewer", "members": [
"user:eve@example.com" ], "condition": { "title":
"expirable access", "description": "Does not grant
access after Sep 2020", "expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag":
"BwWWja0YfJA=", "version": 3 } **YAML example:**
bindings: - members: - user:\ mike@example.com -
group:\ admins@example.com - domain:google.com -
serviceAccount:\ my-project-id@appspot.gserviceaccount.com
role: roles/resourcemanager.organizationAdmin - members:
- user:\ eve@example.com role:
roles/resourcemanager.organizationViewer condition:
title: expirable access description: Does not grant
access after Sep 2020 expression: request.time <
timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA=
version: 3 For a description of IAM and its features,
see the [IAM
documentation](\ https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, resource])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.GetIamPolicyLicenseRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.GetIamPolicyLicenseRequest):
request = compute.GetIamPolicyLicenseRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if resource is not None:
request.resource = resource
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_iam_policy]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def insert_unary(
self,
request: Union[compute.InsertLicenseRequest, dict] = None,
*,
project: str = None,
license_resource: compute.License = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Create a License resource in the specified project. *Caution*
This resource is intended for use only by third-party partners
who are creating Cloud Marketplace images.
Args:
request (Union[google.cloud.compute_v1.types.InsertLicenseRequest, dict]):
The request object. A request message for
Licenses.Insert. See the method description for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
license_resource (google.cloud.compute_v1.types.License):
The body resource for this request
This corresponds to the ``license_resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
[Global](/compute/docs/reference/rest/v1/globalOperations)
\*
[Regional](/compute/docs/reference/rest/v1/regionOperations)
\*
[Zonal](/compute/docs/reference/rest/v1/zoneOperations)
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the globalOperations
resource. - For regional operations, use the
regionOperations resource. - For zonal operations, use
the zonalOperations resource. For more information, read
Global, Regional, and Zonal Resources.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, license_resource])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.InsertLicenseRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.InsertLicenseRequest):
request = compute.InsertLicenseRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if license_resource is not None:
request.license_resource = license_resource
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.insert]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list(
self,
request: Union[compute.ListLicensesRequest, dict] = None,
*,
project: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListPager:
r"""Retrieves the list of licenses available in the specified
project. This method does not get any licenses that belong to
other projects, including licenses attached to
publicly-available images, like Debian 9. If you want to get a
list of publicly-available licenses, use this method to make a
request to the respective image project, such as debian-cloud or
windows-cloud. *Caution* This resource is intended for use only
by third-party partners who are creating Cloud Marketplace
images.
Args:
request (Union[google.cloud.compute_v1.types.ListLicensesRequest, dict]):
The request object. A request message for Licenses.List.
See the method description for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.services.licenses.pagers.ListPager:
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.ListLicensesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.ListLicensesRequest):
request = compute.ListLicensesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def set_iam_policy(
self,
request: Union[compute.SetIamPolicyLicenseRequest, dict] = None,
*,
project: str = None,
resource: str = None,
global_set_policy_request_resource: compute.GlobalSetPolicyRequest = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Policy:
r"""Sets the access control policy on the specified resource.
Replaces any existing policy. *Caution* This resource is
intended for use only by third-party partners who are creating
Cloud Marketplace images.
Args:
request (Union[google.cloud.compute_v1.types.SetIamPolicyLicenseRequest, dict]):
The request object. A request message for
Licenses.SetIamPolicy. See the method description for
details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
resource (str):
Name or id of the resource for this
request.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
global_set_policy_request_resource (google.cloud.compute_v1.types.GlobalSetPolicyRequest):
The body resource for this request
This corresponds to the ``global_set_policy_request_resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.Policy:
An Identity and Access Management (IAM) policy, which
specifies access controls for Google Cloud resources. A
Policy is a collection of bindings. A binding binds one
or more members, or principals, to a single role.
Principals can be user accounts, service accounts,
Google groups, and domains (such as G Suite). A role is
a named list of permissions; each role can be an IAM
predefined role or a user-created custom role. For some
types of Google Cloud resources, a binding can also
specify a condition, which is a logical expression that
allows access to a resource only if the expression
evaluates to true. A condition can add constraints based
on attributes of the request, the resource, or both. To
learn which resources support conditions in their IAM
policies, see the [IAM
documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:** { "bindings": [ { "role":
"roles/resourcemanager.organizationAdmin", "members": [
"user:mike@example.com", "group:admins@example.com",
"domain:google.com",
"serviceAccount:my-project-id@appspot.gserviceaccount.com"
] }, { "role":
"roles/resourcemanager.organizationViewer", "members": [
"user:eve@example.com" ], "condition": { "title":
"expirable access", "description": "Does not grant
access after Sep 2020", "expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag":
"BwWWja0YfJA=", "version": 3 } **YAML example:**
bindings: - members: - user:\ mike@example.com -
group:\ admins@example.com - domain:google.com -
serviceAccount:\ my-project-id@appspot.gserviceaccount.com
role: roles/resourcemanager.organizationAdmin - members:
- user:\ eve@example.com role:
roles/resourcemanager.organizationViewer condition:
title: expirable access description: Does not grant
access after Sep 2020 expression: request.time <
timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA=
version: 3 For a description of IAM and its features,
see the [IAM
documentation](\ https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any(
[project, resource, global_set_policy_request_resource]
)
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.SetIamPolicyLicenseRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.SetIamPolicyLicenseRequest):
request = compute.SetIamPolicyLicenseRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if resource is not None:
request.resource = resource
if global_set_policy_request_resource is not None:
request.global_set_policy_request_resource = (
global_set_policy_request_resource
)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_iam_policy]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def test_iam_permissions(
self,
request: Union[compute.TestIamPermissionsLicenseRequest, dict] = None,
*,
project: str = None,
resource: str = None,
test_permissions_request_resource: compute.TestPermissionsRequest = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.TestPermissionsResponse:
r"""Returns permissions that a caller has on the specified resource.
*Caution* This resource is intended for use only by third-party
partners who are creating Cloud Marketplace images.
Args:
request (Union[google.cloud.compute_v1.types.TestIamPermissionsLicenseRequest, dict]):
The request object. A request message for
Licenses.TestIamPermissions. See the method description
for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
resource (str):
Name or id of the resource for this
request.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest):
The body resource for this request
This corresponds to the ``test_permissions_request_resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.TestPermissionsResponse:
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any(
[project, resource, test_permissions_request_resource]
)
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.TestIamPermissionsLicenseRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.TestIamPermissionsLicenseRequest):
request = compute.TestIamPermissionsLicenseRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if resource is not None:
request.resource = resource
if test_permissions_request_resource is not None:
request.test_permissions_request_resource = (
test_permissions_request_resource
)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-compute",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("LicensesClient",)
|
|
#!/usr/bin/env python
import os
import sys
import subprocess
import json
from pprint import pprint
import StringIO
import csv
import ast
from getpass import getpass
from hashlib import md5
def generate_nodesfile(vcname, subnet=None):
nodescmd = "cm comet cluster {} --format=rest".format(vcname)
proc = subprocess.Popen(nodescmd.split(" "), stdout=subprocess.PIPE)
(out, err) = proc.communicate()
nodesobj = ast.literal_eval(out)
frontend = nodesobj[0]['frontend']
fields = ["ip", "mask", "gateway", "dns", "ntp"]
with open('vcnet_{}.txt'.format(vcname), 'w') as f:
print >> f, "IP:", frontend["pub_ip"]
print >> f, "MASK:", frontend["pub_netmask"]
print >> f, "GATEWAY:", frontend["gateway"]
print >> f, "DNS:", "{} {}".format(frontend["dns1"], frontend["dns2"])
print >> f, "NTP:", frontend["ntp"]
nodes = nodesobj[0]['computes']
if not subnet:
subnet = "10.0.0."
ip_idx = 0
with open('vcnodes_{}.txt'.format(vcname), 'w') as f:
for node in nodes:
ip_idx += 1
print >> f, "{},{},{}".format(node["name"],
node["interface"][0]["mac"],
"{}{}".format(subnet, ip_idx)
)
def generate_pxefile(nodesfile=None, netfile=None, vc=None):
if vc:
if not nodesfile:
nodesfile = "vcnodes_{}.txt".format(vc)
if not netfile:
netfile = "vcnet_{}.txt".format(vc)
else:
if not (nodesfile and netfile):
print ("parameters not specified")
return
pxetemp = "/var/lib/tftpboot/pxelinux.cfg/default.temp"
netconfs = {}
with open(netfile) as file:
lines = file.readlines()
for line in lines:
row = line.split(": ")
netconfs[row[0]] = row[1].strip("\n")
with open(nodesfile) as nodes:
lines = nodes.readlines()
for aline in lines:
row = aline.split(",")
#print (row)
name = row[0]
mac = row[1]
ip = row[2].strip("\n")
filename="01-{}".format(mac.replace(":","-"))
#print (filename)
replacements = {'$NETIP':ip,
'$NODENAME':name,
#'$NETMASK':netconfs["MASK"],
'$NETMASK':"255.255.255.0",
#'$NETGATEWAY':netconfs["GATEWAY"],
'$NETGATEWAY':"10.0.0.254",
"$DNS":netconfs["DNS"]}
#print (replacements)
with open('/var/lib/tftpboot/pxelinux.cfg/{}'.format(filename), 'w') as outfile:
with open(pxetemp) as infile:
for line in infile:
for src, target in replacements.iteritems():
line = line.replace(src, target)
#print (line)
outfile.write(line)
#print ("finished writing one file...")
def setboot(node, nodesfile=None, vc=None, net=True):
if vc:
if not nodesfile:
nodesfile = "vcnodes_{}.txt".format(vc)
else:
if not nodesfile:
print ("parameters not specified")
return
filename = None
with open(nodesfile) as nodes:
lines = nodes.readlines()
for aline in lines:
row = aline.split(",")
name = row[0]
mac = row[1]
if name == node:
filename="01-{}".format(mac.replace(":","-"))
break
if filename:
lines = []
with open('/var/lib/tftpboot/pxelinux.cfg/{}'.format(filename)) as infile:
lines = infile.readlines()
with open('/var/lib/tftpboot/pxelinux.cfg/{}'.format(filename), 'w') as outfile:
netbootline = "default netinstall"
localbootline = "default local"
for line in lines:
if net:
line = line.replace(localbootline, netbootline)
else:
line = line.replace(netbootline, localbootline)
outfile.write(line)
def addhosts(nodesfile=None, vc=None):
if vc:
if not nodesfile:
nodesfile = "vcnodes_{}.txt".format(vc)
else:
if not nodesfile:
print ("parameters not specified")
return
with open(nodesfile) as nodes, open("/etc/hosts", "a") as hostfile:
lines = nodes.readlines()
print >> hostfile
for aline in lines:
row = aline.split(",")
name = row[0]
mac = row[1]
ip = row[2].strip("\n")
print >> hostfile, "{}\t{}.local {}".format(ip, name, name)
def setpassword():
print ("Type the root password for the computenodes:")
password = md5(getpass()).hexdigest()
ksfile = "/var/www/html/ks.cfg"
with open(ksfile) as infile:
lines = infile.readlines()
with open(ksfile, 'w') as outfile:
for line in lines:
line = line.replace("$ROOT_PASSWORD", password)
outfile.write(line)
# before installing the compute nodes
def setkey():
os.system("ssh-keygen")
keyfile = ("{}/.ssh/id_rsa.pub").format(get_sudouser_home())
key = ''
with open(keyfile) as f:
key = f.readline().strip("\n")
scriptfile = "/var/www/html/postscript.sh"
with open(scriptfile) as infile:
lines = infile.readlines()
with open(scriptfile, 'w') as outfile:
for line in lines:
line = line.replace("$PUBLICKEY", key)
outfile.write(line)
# call after compute nodes up and running
def setknownhosts(nodesfile=None, vc=None):
if vc:
if not nodesfile:
nodesfile = "vcnodes_{}.txt".format(vc)
else:
if not nodesfile:
print ("parameters not specified")
return
ips = []
iphosts = '\n'
with open(nodesfile) as nodes:
lines = nodes.readlines()
for aline in lines:
row = aline.split(",")
name = row[0]
mac = row[1]
ip = row[2].strip("\n")
ips.append(ip)
iphosts += "{}\t{}\n".format(ip, name)
os.system("ssh-keyscan -H {} >> {} 2>/dev/null".format(ip, "{}/.ssh/known_hosts".format(get_sudouser_home())))
os.system("ssh-keyscan -H {} >> {} 2>/dev/null".format(name, "{}/.ssh/known_hosts".format(get_sudouser_home())))
for ip in ips:
os.system("scp {}/.ssh/known_hosts root@{}:/root/.ssh/".format(get_sudouser_home(), ip))
os.system("scp {}/.ssh/id_rsa root@{}:/root/.ssh/".format(get_sudouser_home(), ip))
os.system("echo '{}' | ssh root@{} 'cat >> /etc/hosts'".format(iphosts, ip))
# sudo cmutil.py still gets the unprivileged user directory, which breaks
# ssh key location
def get_sudouser_home():
home = ""
if 'root' == os.getenv("USER"):
home = "/root"
else:
home = os.path.expanduser("~")
return home
def usage():
usagestr = "Usage:\n"\
"./cmutil.py nodesfile\n"\
"./cmutil.py pxefile vc2\n"\
"./cmutil.py setkey\n"\
"./cmutil.py setpassword\n"\
"./cmutil.py setboot vc2 node1 net=false\n"\
"./cmutil.py setboot vc2 node1 net=true\n"\
"./cmutil.py addhosts vc2\n"\
"./cmutil.py setknownhosts vc2\n"
print (usagestr)
if __name__ == "__main__":
argv = sys.argv[1:]
commands = ['nodesfile', 'pxefile', 'setkey', 'setpassword', 'setboot', 'addhosts', 'setknownhosts']
if len(argv) >= 1:
cmd = argv[0]
cluster = ''
if len(argv) > 1:
cluster = argv[1]
if cmd in commands:
if cmd == 'nodesfile':
generate_nodesfile(cluster)
elif cmd == 'pxefile':
generate_pxefile(vc=cluster)
elif cmd == 'setkey':
setkey()
elif cmd == 'setpassword':
setpassword()
elif cmd == 'setboot':
node = argv[2]
bootparam = argv[3]
params = bootparam.split("=")
if params[0].lower() == 'net':
if params[1].lower() == 'true':
netboot = True
elif params[1].lower() == 'false':
netboot = False
else:
netboot = False
setboot(node, vc=cluster, net=netboot)
elif cmd == 'addhosts':
addhosts(vc=cluster)
elif cmd == 'setknownhosts':
setknownhosts(vc=cluster)
else:
usage()
else:
usage()
else:
usage()
|
|
# flake8: noqa
# pylint: skip-file
# noqa: E301,E302
class YeditException(Exception):
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
return None
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# pylint: disable=no-member,maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in ' +
'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
% (inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# If vtype is not str then go ahead and attempt to yaml load it.
if isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming ' +
'value. value=[%s] vtype=[%s]'
% (type(inc_value), vtype))
return inc_value
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(module):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=module.params['src'],
backup=module.params['backup'],
separator=module.params['separator'])
if module.params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and \
module.params['state'] != 'present':
return {'failed': True,
'msg': 'Error opening file [%s]. Verify that the ' +
'file exists, that it is has correct' +
' permissions, and is valid yaml.'}
if module.params['state'] == 'list':
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['key']:
rval = yamlfile.get(module.params['key']) or {}
return {'changed': False, 'result': rval, 'state': "list"}
elif module.params['state'] == 'absent':
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['update']:
rval = yamlfile.pop(module.params['key'],
module.params['value'])
else:
rval = yamlfile.delete(module.params['key'])
if rval[0] and module.params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
elif module.params['state'] == 'present':
# check if content is different than what is in the file
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
module.params['value'] is None:
return {'changed': False,
'result': yamlfile.yaml_dict,
'state': "present"}
yamlfile.yaml_dict = content
# we were passed a value; parse it
if module.params['value']:
value = Yedit.parse_value(module.params['value'],
module.params['value_type'])
key = module.params['key']
if module.params['update']:
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
module.params['curr_value_format']) # noqa: E501
rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
elif module.params['append']:
rval = yamlfile.append(key, value)
else:
rval = yamlfile.put(key, value)
if rval[0] and module.params['src']:
yamlfile.write()
return {'changed': rval[0],
'result': rval[1], 'state': "present"}
# no edits to make
if module.params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': "present"}
return {'failed': True, 'msg': 'Unkown state passed'}
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstractions for the head(s) of a model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator import model_fn
from tensorflow.python.estimator import util
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export_output
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.summary import summary
_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
def multi_class_head(n_classes,
weight_column=None,
label_vocabulary=None,
name=None):
"""Creates a `_Head` for multi class classification.
Uses `sparse_softmax_cross_entropy` loss.
This head expects to be fed integer labels specifying the class index.
Args:
n_classes: Number of classes, must be greater than 2 (for 2 classes, use
`binary_classification_head`).
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_vocabulary: A list of strings represents possible label values. If it
is not given, that means labels are already encoded as integer within
[0, n_classes). If given, labels must be string type and have any value in
`label_vocabulary`. Also there will be errors if vocabulary is not
provided and labels are string.
name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + name`. Also used as `name_scope` when creating ops.
Returns:
An instance of `_Head` for multi class classification.
Raises:
ValueError: if `n_classes`, `metric_class_ids` or `label_keys` is invalid.
"""
return head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint:disable=protected-access
n_classes=n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
name=name)
def binary_classification_head(
weight_column=None, thresholds=None, label_vocabulary=None, name=None):
"""Creates a `_Head` for single label binary classification.
This head uses `sigmoid_cross_entropy_with_logits` loss.
This head expects to be fed float labels of shape `(batch_size, 1)`.
Args:
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
thresholds: Iterable of floats in the range `(0, 1)`. For binary
classification metrics such as precision and recall, an eval metric is
generated for each threshold value. This threshold is applied to the
logistic values to determine the binary classification (i.e., above the
threshold is `true`, below is `false`.
label_vocabulary: A list of strings represents possible label values. If it
is not given, that means labels are already encoded within [0, 1]. If
given, labels must be string type and have any value in
`label_vocabulary`. Also there will be errors if vocabulary is not
provided and labels are string.
name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + name`. Also used as `name_scope` when creating ops.
Returns:
An instance of `_Head` for binary classification.
Raises:
ValueError: if `thresholds` contains a value outside of `(0, 1)`.
"""
return head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss( # pylint:disable=protected-access
weight_column=weight_column,
thresholds=thresholds,
label_vocabulary=label_vocabulary,
name=name)
def regression_head(weight_column=None,
label_dimension=1,
name=None):
"""Creates a `_Head` for regression using the mean squared loss.
Uses `mean_squared_error` loss.
Args:
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + name`. Also used as `name_scope` when creating ops.
Returns:
An instance of `_Head` for linear regression.
"""
return head_lib._regression_head_with_mean_squared_error_loss( # pylint:disable=protected-access
weight_column=weight_column,
label_dimension=label_dimension,
name=name)
def multi_label_head(n_classes,
weight_column=None,
thresholds=None,
label_vocabulary=None,
loss_fn=None,
name=None):
"""Creates a `_Head` for multi-label classification.
Multi-label classification handles the case where each example may have zero
or more associated labels, from a discrete set. This is distinct from
`multi_class_head` which has exactly one label per example.
Uses `sigmoid_cross_entropy` loss averaged over classes. Expects labels as a
multi-hot tensor of shape `[batch_size, n_classes]`, or as an integer
`SparseTensor` of class indices.
Also supports custom `loss_fn`. `loss_fn` takes `(labels, logits)` or
`(labels, logits, features)` as arguments and returns unreduced loss with
shape `[batch_size, 1]`. `loss_fn` must support indicator `labels` with shape
`[batch_size, n_classes]`. Namely, the head applies `label_vocabulary` to the
input labels before passing them to `loss_fn`.
Args:
n_classes: Number of classes, must be greater than 1 (for 1 class, use
`binary_classification_head`).
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
thresholds: Iterable of floats in the range `(0, 1)`. Accuracy, precision
and recall metrics are evaluated for each threshold value. The threshold
is applied to the predicted probabilities, i.e. above the threshold is
`true`, below is `false`.
label_vocabulary: A list of strings represents possible label values. If it
is not given, that means labels are already encoded as integer within
[0, n_classes) or multi-hot Tensor. If given, labels must be SparseTensor
string type and have any value in `label_vocabulary`. Also there will be
errors if vocabulary is not provided and labels are string.
loss_fn: Optional loss function.
name: name of the head. If provided, summary and metrics keys will be
suffixed by `"/" + name`. Also used as `name_scope` when creating ops.
Returns:
An instance of `_Head` for multi-label classification.
Raises:
ValueError: if `n_classes` or `thresholds` is invalid.
"""
thresholds = tuple(thresholds) if thresholds else tuple()
if n_classes is None or n_classes < 2:
raise ValueError(
'n_classes must be > 1 for multi-class classification. '
'Given: {}'.format(n_classes))
for threshold in thresholds:
if (threshold <= 0.0) or (threshold >= 1.0):
raise ValueError(
'thresholds must be in (0, 1) range. Given: {}'.format(threshold))
if label_vocabulary is not None:
if not isinstance(label_vocabulary, (list, tuple)):
raise ValueError(
'label_vocabulary must be a list or tuple. '
'Given type: {}'.format(type(label_vocabulary)))
if len(label_vocabulary) != n_classes:
raise ValueError(
'Length of label_vocabulary must be n_classes ({}). '
'Given: {}'.format(n_classes, len(label_vocabulary)))
if loss_fn:
_validate_loss_fn_args(loss_fn)
return _MultiLabelHead(
n_classes=n_classes, weight_column=weight_column, thresholds=thresholds,
label_vocabulary=label_vocabulary, loss_fn=loss_fn, name=name)
class _MultiLabelHead(head_lib._Head): # pylint:disable=protected-access
"""`_Head` for multi-label classification."""
def __init__(self,
n_classes,
weight_column=None,
thresholds=None,
label_vocabulary=None,
loss_fn=None,
name=None):
self._n_classes = n_classes
self._weight_column = weight_column
self._thresholds = thresholds
self._label_vocabulary = label_vocabulary
self._loss_fn = loss_fn
self._name = name
@property
def name(self):
return self._name
@property
def logits_dimension(self):
return self._n_classes
def _process_labels(self, labels):
if labels is None:
raise ValueError(
'You must provide a labels Tensor. Given: None. '
'Suggested troubleshooting steps: Check that your data contain '
'your label feature. Check that your input_fn properly parses and '
'returns labels.')
if isinstance(labels, sparse_tensor.SparseTensor):
if labels.dtype == dtypes.string:
label_ids_values = lookup_ops.index_table_from_tensor(
vocabulary_list=tuple(self._label_vocabulary),
name='class_id_lookup').lookup(labels.values)
label_ids = sparse_tensor.SparseTensor(
indices=labels.indices,
values=label_ids_values,
dense_shape=labels.dense_shape)
else:
label_ids = labels
return math_ops.to_int64(
sparse_ops.sparse_to_indicator(label_ids, self._n_classes))
msg = ('labels shape must be [batch_size, {}]. '
'Given: ').format(self._n_classes)
labels_shape = array_ops.shape(labels)
check_rank_op = control_flow_ops.Assert(
math_ops.equal(array_ops.rank(labels), 2),
data=[msg, labels_shape])
check_label_dim = control_flow_ops.Assert(
math_ops.equal(labels_shape[-1], self._n_classes),
data=[msg, labels_shape])
with ops.control_dependencies([check_rank_op, check_label_dim]):
return array_ops.identity(labels)
def create_loss(self, features, mode, logits, labels):
"""See `Head`."""
del mode # Unused for this head.
processed_labels = self._process_labels(labels)
if self._loss_fn:
unweighted_loss = _call_loss_fn(
loss_fn=self._loss_fn, labels=processed_labels, logits=logits,
features=features)
else:
unweighted_loss = losses.sigmoid_cross_entropy(
multi_class_labels=processed_labels, logits=logits,
reduction=losses.Reduction.NONE)
# Averages loss over classes.
unweighted_loss = math_ops.reduce_mean(
unweighted_loss, axis=-1, keep_dims=True)
weights = head_lib._weights(features, self._weight_column) # pylint:disable=protected-access,
weighted_sum_loss = losses.compute_weighted_loss(
unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
# _weights() can return 1.
example_weight_sum = math_ops.reduce_sum(
weights * array_ops.ones_like(unweighted_loss))
return head_lib.LossSpec(
weighted_sum_loss=weighted_sum_loss,
example_weight_sum=example_weight_sum,
processed_labels=processed_labels)
def create_estimator_spec(
self, features, mode, logits, labels=None, train_op_fn=None):
"""See `Head`."""
with ops.name_scope(self._name, 'head'):
logits = head_lib._check_logits(logits, self.logits_dimension) # pylint:disable=protected-access
# Predict.
pred_keys = prediction_keys.PredictionKeys
with ops.name_scope(None, 'predictions', (logits,)):
probabilities = math_ops.sigmoid(logits, name=pred_keys.PROBABILITIES)
predictions = {
pred_keys.LOGITS: logits,
pred_keys.PROBABILITIES: probabilities,
}
if mode == model_fn.ModeKeys.PREDICT:
classifier_output = head_lib._classification_output( # pylint:disable=protected-access
scores=probabilities, n_classes=self._n_classes,
label_vocabulary=self._label_vocabulary)
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
_DEFAULT_SERVING_KEY: classifier_output,
head_lib._CLASSIFY_SERVING_KEY: classifier_output, # pylint:disable=protected-access
head_lib._PREDICT_SERVING_KEY: ( # pylint:disable=protected-access
export_output.PredictOutput(predictions))
})
(weighted_sum_loss, example_weight_sum,
processed_labels) = self.create_loss(
features=features, mode=mode, logits=logits, labels=labels)
# Eval.
if mode == model_fn.ModeKeys.EVAL:
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.EVAL,
predictions=predictions,
loss=weighted_sum_loss,
eval_metric_ops=self._eval_metric_ops(
labels=processed_labels,
probabilities=probabilities,
weights=head_lib._weights(features, self._weight_column), # pylint:disable=protected-access,
weighted_sum_loss=weighted_sum_loss,
example_weight_sum=example_weight_sum))
# Train.
if train_op_fn is None:
raise ValueError('train_op_fn can not be None.')
with ops.name_scope(''):
summary.scalar(
head_lib._summary_key(self._name, metric_keys.MetricKeys.LOSS), # pylint:disable=protected-access
weighted_sum_loss)
summary.scalar(
head_lib._summary_key( # pylint:disable=protected-access
self._name, metric_keys.MetricKeys.LOSS_MEAN),
weighted_sum_loss / example_weight_sum)
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.TRAIN,
predictions=predictions,
loss=weighted_sum_loss,
train_op=train_op_fn(weighted_sum_loss))
def _eval_metric_ops(self, labels, probabilities, weights, weighted_sum_loss,
example_weight_sum):
"""Returns a dict of metrics for eval_metric_ops."""
with ops.name_scope(
None, 'metrics',
[labels, probabilities, weights, weighted_sum_loss, example_weight_sum
]):
keys = metric_keys.MetricKeys
metric_ops = {
# Estimator already adds a metric for loss.
head_lib._summary_key(self._name, keys.LOSS_MEAN): # pylint:disable=protected-access
metrics_lib.mean(
# Both values and weights here are reduced, scalar Tensors.
# values is the actual mean we want, but we pass the scalar
# example_weight_sum in order to return the correct update_op
# alongside the value_op for streaming metrics.
values=(weighted_sum_loss / example_weight_sum),
weights=example_weight_sum,
name=keys.LOSS_MEAN),
head_lib._summary_key(self._name, keys.AUC): # pylint:disable=protected-access
metrics_lib.auc(labels=labels, predictions=probabilities,
weights=weights, name=keys.AUC),
head_lib._summary_key(self._name, keys.AUC_PR): # pylint:disable=protected-access
metrics_lib.auc(labels=labels, predictions=probabilities,
weights=weights, curve='PR',
name=keys.AUC_PR),
}
for threshold in self._thresholds:
accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
metric_ops[head_lib._summary_key(self._name, accuracy_key)] = ( # pylint:disable=protected-access
head_lib._accuracy_at_threshold( # pylint:disable=protected-access
labels=labels,
predictions=probabilities,
weights=weights,
threshold=threshold,
name=accuracy_key))
# Precision for positive examples.
precision_key = keys.PRECISION_AT_THRESHOLD % threshold
metric_ops[head_lib._summary_key(self._name, precision_key)] = ( # pylint:disable=protected-access
head_lib._precision_at_threshold( # pylint:disable=protected-access
labels=labels,
predictions=probabilities,
weights=weights,
threshold=threshold,
name=precision_key))
# Recall for positive examples.
recall_key = keys.RECALL_AT_THRESHOLD % threshold
metric_ops[head_lib._summary_key(self._name, recall_key)] = ( # pylint:disable=protected-access
head_lib._recall_at_threshold( # pylint:disable=protected-access
labels=labels,
predictions=probabilities,
weights=weights,
threshold=threshold,
name=recall_key))
return metric_ops
def _validate_loss_fn_args(loss_fn):
"""Validates loss_fn arguments.
Required arguments: labels, logits.
Optional arguments: features.
Args:
loss_fn: The loss function.
Raises:
ValueError: If the signature is unexpected.
"""
loss_fn_args = util.fn_args(loss_fn)
for required_arg in ['labels', 'logits']:
if required_arg not in loss_fn_args:
raise ValueError(
'loss_fn must contain argument: {}. '
'Given arguments: {}'.format(required_arg, loss_fn_args))
invalid_args = list(set(loss_fn_args) - set(['labels', 'logits', 'features']))
if invalid_args:
raise ValueError('loss_fn has unexpected args: {}'.format(invalid_args))
def _call_loss_fn(loss_fn, labels, logits, features):
"""Calls loss_fn and checks the returned shape.
Args:
loss_fn: The loss function.
labels: Processed labels Tensor.
logits: Logits Tensor of shape [batch_size, logits_dimension].
features: Features dict.
Returns:
Loss Tensor with shape [batch_size, 1].
"""
loss_fn_args = util.fn_args(loss_fn)
kwargs = {}
if 'features' in loss_fn_args:
kwargs['features'] = features
unweighted_loss = loss_fn(labels=labels, logits=logits, **kwargs)
batch_size = array_ops.shape(logits)[0]
loss_shape = array_ops.shape(unweighted_loss)
check_shape_op = control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(loss_shape, [batch_size, 1])),
data=[
'loss_fn must return Tensor of shape [batch_size, 1]. Given: ',
loss_shape])
with ops.control_dependencies([check_shape_op]):
return array_ops.identity(unweighted_loss)
|
|
#!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A test script which attempts to detect memory leaks by calling C
functions many times and compare process memory usage before and
after the calls. It might produce false positives.
"""
import functools
import gc
import os
import socket
import sys
import threading
import time
import psutil
import psutil._common
from psutil._compat import xrange, callable
from test_psutil import (WINDOWS, POSIX, OSX, LINUX, SUNOS, BSD, TESTFN,
RLIMIT_SUPPORT, TRAVIS)
from test_psutil import (reap_children, supports_ipv6, safe_remove,
get_test_subprocess)
if sys.version_info < (2, 7):
import unittest2 as unittest # https://pypi.python.org/pypi/unittest2
else:
import unittest
LOOPS = 1000
TOLERANCE = 4096
SKIP_PYTHON_IMPL = True
def skip_if_linux():
return unittest.skipIf(LINUX and SKIP_PYTHON_IMPL,
"not worth being tested on LINUX (pure python)")
class Base(unittest.TestCase):
proc = psutil.Process()
def execute(self, function, *args, **kwargs):
def call_many_times():
for x in xrange(LOOPS - 1):
self.call(function, *args, **kwargs)
del x
gc.collect()
return self.get_mem()
self.call(function, *args, **kwargs)
self.assertEqual(gc.garbage, [])
self.assertEqual(threading.active_count(), 1)
# RSS comparison
# step 1
rss1 = call_many_times()
# step 2
rss2 = call_many_times()
difference = rss2 - rss1
if difference > TOLERANCE:
# This doesn't necessarily mean we have a leak yet.
# At this point we assume that after having called the
# function so many times the memory usage is stabilized
# and if there are no leaks it should not increase any
# more.
# Let's keep calling fun for 3 more seconds and fail if
# we notice any difference.
stop_at = time.time() + 3
while True:
self.call(function, *args, **kwargs)
if time.time() >= stop_at:
break
del stop_at
gc.collect()
rss3 = self.get_mem()
difference = rss3 - rss2
if rss3 > rss2:
self.fail("rss2=%s, rss3=%s, difference=%s"
% (rss2, rss3, difference))
def execute_w_exc(self, exc, function, *args, **kwargs):
kwargs['_exc'] = exc
self.execute(function, *args, **kwargs)
def get_mem(self):
return psutil.Process().memory_info()[0]
def call(self, function, *args, **kwargs):
raise NotImplementedError("must be implemented in subclass")
class TestProcessObjectLeaks(Base):
"""Test leaks of Process class methods and properties"""
def setUp(self):
gc.collect()
def tearDown(self):
reap_children()
def call(self, function, *args, **kwargs):
if callable(function):
if '_exc' in kwargs:
exc = kwargs.pop('_exc')
self.assertRaises(exc, function, *args, **kwargs)
else:
try:
function(*args, **kwargs)
except psutil.Error:
pass
else:
meth = getattr(self.proc, function)
if '_exc' in kwargs:
exc = kwargs.pop('_exc')
self.assertRaises(exc, meth, *args, **kwargs)
else:
try:
meth(*args, **kwargs)
except psutil.Error:
pass
@skip_if_linux()
def test_name(self):
self.execute('name')
@skip_if_linux()
def test_cmdline(self):
self.execute('cmdline')
@skip_if_linux()
def test_exe(self):
self.execute('exe')
@skip_if_linux()
def test_ppid(self):
self.execute('ppid')
@unittest.skipUnless(POSIX, "POSIX only")
@skip_if_linux()
def test_uids(self):
self.execute('uids')
@unittest.skipUnless(POSIX, "POSIX only")
@skip_if_linux()
def test_gids(self):
self.execute('gids')
@skip_if_linux()
def test_status(self):
self.execute('status')
def test_nice_get(self):
self.execute('nice')
def test_nice_set(self):
niceness = psutil.Process().nice()
self.execute('nice', niceness)
@unittest.skipUnless(hasattr(psutil.Process, 'ionice'),
"Linux and Windows Vista only")
def test_ionice_get(self):
self.execute('ionice')
@unittest.skipUnless(hasattr(psutil.Process, 'ionice'),
"Linux and Windows Vista only")
def test_ionice_set(self):
if WINDOWS:
value = psutil.Process().ionice()
self.execute('ionice', value)
else:
from psutil._pslinux import cext
self.execute('ionice', psutil.IOPRIO_CLASS_NONE)
fun = functools.partial(cext.proc_ioprio_set, os.getpid(), -1, 0)
self.execute_w_exc(OSError, fun)
@unittest.skipIf(OSX or SUNOS, "feature not supported on this platform")
@skip_if_linux()
def test_io_counters(self):
self.execute('io_counters')
@unittest.skipUnless(WINDOWS, "not worth being tested on posix")
def test_username(self):
self.execute('username')
@skip_if_linux()
def test_create_time(self):
self.execute('create_time')
@skip_if_linux()
def test_num_threads(self):
self.execute('num_threads')
@unittest.skipUnless(WINDOWS, "Windows only")
def test_num_handles(self):
self.execute('num_handles')
@unittest.skipUnless(POSIX, "POSIX only")
@skip_if_linux()
def test_num_fds(self):
self.execute('num_fds')
@skip_if_linux()
def test_threads(self):
self.execute('threads')
@skip_if_linux()
def test_cpu_times(self):
self.execute('cpu_times')
@skip_if_linux()
def test_memory_info(self):
self.execute('memory_info')
@skip_if_linux()
def test_memory_info_ex(self):
self.execute('memory_info_ex')
@unittest.skipUnless(POSIX, "POSIX only")
@skip_if_linux()
def test_terminal(self):
self.execute('terminal')
@unittest.skipIf(POSIX and SKIP_PYTHON_IMPL,
"not worth being tested on POSIX (pure python)")
def test_resume(self):
self.execute('resume')
@skip_if_linux()
def test_cwd(self):
self.execute('cwd')
@unittest.skipUnless(WINDOWS or LINUX or BSD,
"Windows or Linux or BSD only")
def test_cpu_affinity_get(self):
self.execute('cpu_affinity')
@unittest.skipUnless(WINDOWS or LINUX or BSD,
"Windows or Linux or BSD only")
def test_cpu_affinity_set(self):
affinity = psutil.Process().cpu_affinity()
self.execute('cpu_affinity', affinity)
if not TRAVIS:
self.execute_w_exc(ValueError, 'cpu_affinity', [-1])
@skip_if_linux()
def test_open_files(self):
safe_remove(TESTFN) # needed after UNIX socket test has run
with open(TESTFN, 'w'):
self.execute('open_files')
# OSX implementation is unbelievably slow
@unittest.skipIf(OSX, "OSX implementation is too slow")
@skip_if_linux()
def test_memory_maps(self):
self.execute('memory_maps')
@unittest.skipUnless(LINUX, "Linux only")
@unittest.skipUnless(LINUX and RLIMIT_SUPPORT,
"only available on Linux >= 2.6.36")
def test_rlimit_get(self):
self.execute('rlimit', psutil.RLIMIT_NOFILE)
@unittest.skipUnless(LINUX, "Linux only")
@unittest.skipUnless(LINUX and RLIMIT_SUPPORT,
"only available on Linux >= 2.6.36")
def test_rlimit_set(self):
limit = psutil.Process().rlimit(psutil.RLIMIT_NOFILE)
self.execute('rlimit', psutil.RLIMIT_NOFILE, limit)
self.execute_w_exc(OSError, 'rlimit', -1)
@skip_if_linux()
# Windows implementation is based on a single system-wide function
@unittest.skipIf(WINDOWS, "tested later")
def test_connections(self):
def create_socket(family, type):
sock = socket.socket(family, type)
sock.bind(('', 0))
if type == socket.SOCK_STREAM:
sock.listen(1)
return sock
socks = []
socks.append(create_socket(socket.AF_INET, socket.SOCK_STREAM))
socks.append(create_socket(socket.AF_INET, socket.SOCK_DGRAM))
if supports_ipv6():
socks.append(create_socket(socket.AF_INET6, socket.SOCK_STREAM))
socks.append(create_socket(socket.AF_INET6, socket.SOCK_DGRAM))
if hasattr(socket, 'AF_UNIX'):
safe_remove(TESTFN)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.bind(TESTFN)
s.listen(1)
socks.append(s)
kind = 'all'
# TODO: UNIX sockets are temporarily implemented by parsing
# 'pfiles' cmd output; we don't want that part of the code to
# be executed.
if SUNOS:
kind = 'inet'
try:
self.execute('connections', kind=kind)
finally:
for s in socks:
s.close()
p = get_test_subprocess()
DEAD_PROC = psutil.Process(p.pid)
DEAD_PROC.kill()
DEAD_PROC.wait()
del p
class TestProcessObjectLeaksZombie(TestProcessObjectLeaks):
"""Same as above but looks for leaks occurring when dealing with
zombie processes raising NoSuchProcess exception.
"""
proc = DEAD_PROC
def call(self, *args, **kwargs):
try:
TestProcessObjectLeaks.call(self, *args, **kwargs)
except psutil.NoSuchProcess:
pass
if not POSIX:
def test_kill(self):
self.execute('kill')
def test_terminate(self):
self.execute('terminate')
def test_suspend(self):
self.execute('suspend')
def test_resume(self):
self.execute('resume')
def test_wait(self):
self.execute('wait')
class TestModuleFunctionsLeaks(Base):
"""Test leaks of psutil module functions."""
def setUp(self):
gc.collect()
def call(self, function, *args, **kwargs):
fun = getattr(psutil, function)
fun(*args, **kwargs)
@skip_if_linux()
def test_cpu_count_logical(self):
psutil.cpu_count = psutil._psplatform.cpu_count_logical
self.execute('cpu_count')
@skip_if_linux()
def test_cpu_count_physical(self):
psutil.cpu_count = psutil._psplatform.cpu_count_physical
self.execute('cpu_count')
@skip_if_linux()
def test_boot_time(self):
self.execute('boot_time')
@unittest.skipIf(POSIX and SKIP_PYTHON_IMPL,
"not worth being tested on POSIX (pure python)")
def test_pid_exists(self):
self.execute('pid_exists', os.getpid())
def test_virtual_memory(self):
self.execute('virtual_memory')
# TODO: remove this skip when this gets fixed
@unittest.skipIf(SUNOS,
"not worth being tested on SUNOS (uses a subprocess)")
def test_swap_memory(self):
self.execute('swap_memory')
@skip_if_linux()
def test_cpu_times(self):
self.execute('cpu_times')
@skip_if_linux()
def test_per_cpu_times(self):
self.execute('cpu_times', percpu=True)
@unittest.skipIf(POSIX and SKIP_PYTHON_IMPL,
"not worth being tested on POSIX (pure python)")
def test_disk_usage(self):
self.execute('disk_usage', '.')
def test_disk_partitions(self):
self.execute('disk_partitions')
@skip_if_linux()
def test_net_io_counters(self):
self.execute('net_io_counters')
@unittest.skipIf(LINUX and not os.path.exists('/proc/diskstats'),
'/proc/diskstats not available on this Linux version')
@skip_if_linux()
def test_disk_io_counters(self):
self.execute('disk_io_counters')
# XXX - on Windows this produces a false positive
@unittest.skipIf(WINDOWS, "XXX produces a false positive on Windows")
def test_users(self):
self.execute('users')
@unittest.skipIf(LINUX,
"not worth being tested on Linux (pure python)")
def test_net_connections(self):
self.execute('net_connections')
def test_net_if_addrs(self):
self.execute('net_if_addrs')
@unittest.skipIf(TRAVIS, "EPERM on travis")
def test_net_if_stats(self):
self.execute('net_if_stats')
def main():
test_suite = unittest.TestSuite()
tests = [TestProcessObjectLeaksZombie,
TestProcessObjectLeaks,
TestModuleFunctionsLeaks]
for test in tests:
test_suite.addTest(unittest.makeSuite(test))
result = unittest.TextTestRunner(verbosity=2).run(test_suite)
return result.wasSuccessful()
if __name__ == '__main__':
if not main():
sys.exit(1)
|
|
# Lint as: python3
"""LIT wrappers for T5, supporting both HuggingFace and SavedModel formats."""
import re
from typing import List
import attr
from lit_nlp.api import model as lit_model
from lit_nlp.api import types as lit_types
from lit_nlp.examples.models import model_utils
from lit_nlp.lib import utils
import tensorflow as tf
# tensorflow_text is required for T5 SavedModel
import tensorflow_text # pylint: disable=unused-import
import transformers
from rouge_score import rouge_scorer
JsonDict = lit_types.JsonDict
def masked_token_mean(vectors, masks):
"""Mean over tokens.
Args:
vectors: <tf.float32>[batch_size, num_tokens, emb_dim]
masks: <tf.int32>[batch_size, num_tokens]
Returns:
<tf.float32>[batch_size, emb_dim]
"""
masks = tf.cast(masks, tf.float32)
weights = masks / tf.reduce_sum(masks, axis=1, keepdims=True)
return tf.reduce_sum(vectors * tf.expand_dims(weights, axis=-1), axis=1)
@attr.s(auto_attribs=True, kw_only=True)
class T5ModelConfig(object):
"""Config options for a T5 generation model."""
# Input options
inference_batch_size: int = 4
# Generation options
beam_size: int = 4
max_gen_length: int = 50
num_to_generate: int = 1
# Decoding options
token_top_k: int = 10
output_attention: bool = False
def validate_t5_model(model: lit_model.Model) -> lit_model.Model:
"""Validate that a given model looks like a T5 model.
This checks the model spec at runtime; it is intended to be used before server
start, such as in the __init__() method of a wrapper class.
Args:
model: a LIT model
Returns:
model: the same model
Raises:
AssertionError: if the model's spec does not match that expected for a T5
model.
"""
# Check inputs
ispec = model.input_spec()
assert "input_text" in ispec
assert isinstance(ispec["input_text"], lit_types.TextSegment)
if "target_text" in ispec:
assert isinstance(ispec["target_text"], lit_types.TextSegment)
# Check outputs
ospec = model.output_spec()
assert "output_text" in ospec
assert isinstance(
ospec["output_text"],
(lit_types.GeneratedText, lit_types.GeneratedTextCandidates))
assert ospec["output_text"].parent == "target_text"
return model
class T5SavedModel(lit_model.Model):
"""T5 from a TensorFlow SavedModel, for black-box access.
To create a SavedModel from a regular T5 checkpoint, see
https://github.com/google-research/text-to-text-transfer-transformer#export
"""
def __init__(self, saved_model_path: str, model=None, **config_kw):
super().__init__()
# By default, SavedModels from the original T5 codebase have batch_size=1
# hardcoded. Use setdefault here so that the user can still override if
# they've fixed this upstream.
config_kw.setdefault("inference_batch_size", 1)
self.config = T5ModelConfig(**config_kw)
self.model = model or tf.saved_model.load(saved_model_path)
##
# LIT API implementations
def max_minibatch_size(self) -> int:
# The lit.Model base class handles batching automatically in the
# implementation of predict(), and uses this value as the batch size.
return self.config.inference_batch_size
def predict_minibatch(self, inputs):
"""Predict on a single minibatch of examples."""
model_inputs = tf.constant([ex["input_text"] for ex in inputs])
model_outputs = self.model.signatures["serving_default"](model_inputs)
return [{
"output_text": m.decode("utf-8")
} for m in model_outputs["outputs"].numpy()]
def input_spec(self):
return {
"input_text": lit_types.TextSegment(),
"target_text": lit_types.TextSegment(required=False),
}
def output_spec(self):
return {"output_text": lit_types.GeneratedText(parent="target_text")}
class T5HFModel(lit_model.Model):
"""T5 using HuggingFace Transformers and Keras.
This version supports embeddings, attention, and force-decoding of the target
text, as well as more options to control decoding (such as beam search).
"""
@property
def num_layers(self):
return self.model.config.num_layers
def __init__(self,
model_name="t5-small",
model=None,
tokenizer=None,
**config_kw):
super().__init__()
self.config = T5ModelConfig(**config_kw)
assert self.config.num_to_generate <= self.config.beam_size
self.tokenizer = tokenizer or transformers.T5Tokenizer.from_pretrained(
model_name)
self.model = model or model_utils.load_pretrained(
transformers.TFT5ForConditionalGeneration,
model_name,
output_hidden_states=True,
output_attentions=self.config.output_attention)
def _encode_texts(self, texts: List[str]):
return self.tokenizer.batch_encode_plus(
texts,
return_tensors="tf",
padding="longest",
truncation="longest_first")
def _force_decode(self, encoded_inputs, encoded_targets):
"""Get predictions for a batch of tokenized examples.
Each forward pass produces the following:
logits: batch_size x dec_len x vocab_size
decoder_past_key_value_states: tuple with cached outputs.
dec_states: tuple[len:dec_layers]:
batch_size x dec_len x hid_size
dec_attn: [optional] tuple[len:dec_layers+1]
batch_size x num_heads x dec_len x dec_len
enc_final_state: batch_size x enc_len x hid_size
enc_states: tuple[len:enc_layers]:
batch_size x enc_len x hid_size
enc_attn: [optional] tuple[len:enc_layers+1]
batch_size x num_heads x enc_len x enc_len
The two optional attention fields are only returned if
config.output_attention is set.
Args:
encoded_inputs: Dict as returned from Tokenizer for inputs.
encoded_targets: Dict as returned from Tokenizer for outputs
Returns:
batched_outputs: Dict[str, tf.Tensor]
"""
results = self.model(
input_ids=encoded_inputs["input_ids"],
decoder_input_ids=encoded_targets["input_ids"],
attention_mask=encoded_inputs["attention_mask"],
decoder_attention_mask=encoded_targets["attention_mask"])
model_probs = tf.nn.softmax(results.logits, axis=-1)
top_k = tf.math.top_k(
model_probs, k=self.config.token_top_k, sorted=True, name=None)
batched_outputs = {
"input_ids": encoded_inputs["input_ids"],
"input_ntok": tf.reduce_sum(encoded_inputs["attention_mask"], axis=1),
"target_ids": encoded_targets["input_ids"],
"target_ntok": tf.reduce_sum(encoded_targets["attention_mask"], axis=1),
"top_k_indices": top_k.indices,
"top_k_probs": top_k.values,
}
# encoder_last_hidden_state is <float>[batch_size, num_tokens, emb_dim]
# take the mean over real tokens to get <float>[batch_size, emb_dim]
batched_outputs["encoder_final_embedding"] = masked_token_mean(
results.encoder_last_hidden_state, encoded_inputs["attention_mask"])
if self.config.output_attention:
for i in range(len(results.decoder_attentions)):
batched_outputs[
f"decoder_layer_{i+1:d}_attention"] = results.decoder_attentions[i]
for i in range(len(results.encoder_attentions)):
batched_outputs[
f"encoder_layer_{i+1:d}_attention"] = results.encoder_attentions[i]
return batched_outputs
def _postprocess(self, preds):
"""Post-process single-example preds. Operates on numpy arrays."""
# Return tokenization for input text.
input_ntok = preds.pop("input_ntok")
input_ids = preds.pop("input_ids")[:input_ntok]
preds["input_tokens"] = self.tokenizer.convert_ids_to_tokens(input_ids)
# Return tokenization for target text.
target_ntok = preds.pop("target_ntok")
target_ids = preds.pop("target_ids")[:target_ntok]
preds["target_tokens"] = self.tokenizer.convert_ids_to_tokens(target_ids)
# Decode predicted top-k tokens.
# token_topk_preds will be a List[List[(word, prob)]]
# Initialize prediction for 0th token as N/A.
token_topk_preds = [[("N/A", 1.)]]
pred_ids = preds.pop("top_k_indices")[:target_ntok] # <int>[num_tokens, k]
pred_probs = preds.pop(
"top_k_probs")[:target_ntok] # <float32>[num_tokens, k]
for token_pred_ids, token_pred_probs in zip(pred_ids, pred_probs):
token_pred_words = self.tokenizer.convert_ids_to_tokens(token_pred_ids)
token_topk_preds.append(list(zip(token_pred_words, token_pred_probs)))
preds["pred_tokens"] = token_topk_preds
# Decode generated ids
candidates = [
self.tokenizer.decode(ids, skip_special_tokens=True)
for ids in preds.pop("generated_ids")
]
if self.config.num_to_generate > 1:
preds["output_text"] = [(s, None) for s in candidates]
else:
preds["output_text"] = candidates[0]
# Process attention fields, if present.
for key in preds:
if not re.match(r"\w+_layer_(\d+)/attention", key):
continue
if key.startswith("encoder_"):
ntok = input_ntok
elif key.startswith("decoder_"):
ntok = target_ntok
else:
raise ValueError(f"Invalid attention key: '{key}'")
# Select only real tokens, since most of this matrix is padding.
# <float32>[num_heads, max_seq_length, max_seq_length]
# -> <float32>[num_heads, num_tokens, num_tokens]
preds[key] = preds[key][:, :ntok, :ntok].transpose((0, 2, 1))
# Make a copy of this array to avoid memory leaks, since NumPy otherwise
# keeps a pointer around that prevents the source array from being GCed.
preds[key] = preds[key].copy()
return preds
##
# LIT API implementations
def max_minibatch_size(self) -> int:
# The lit.Model base class handles batching automatically in the
# implementation of predict(), and uses this value as the batch size.
return self.config.inference_batch_size
def predict_minibatch(self, inputs):
"""Run model on a single batch.
Args:
inputs: List[Dict] with fields as described by input_spec()
Returns:
outputs: List[Dict] with fields as described by output_spec()
"""
# Text as sequence of sentencepiece ID"s.
encoded_inputs = self._encode_texts([ex["input_text"] for ex in inputs])
encoded_targets = self._encode_texts(
[ex.get("target_text", "") for ex in inputs])
##
# Force-decode on target text, and also get encoder embs and attention.
batched_outputs = self._force_decode(encoded_inputs, encoded_targets)
# Get the conditional generation from the model.
# Workaround for output_hidden not being compatible with generate.
# See https://github.com/huggingface/transformers/issues/8361
self.model.config.output_hidden_states = False
generated_ids = self.model.generate(
encoded_inputs.input_ids,
num_beams=self.config.beam_size,
attention_mask=encoded_inputs.attention_mask,
max_length=self.config.max_gen_length,
num_return_sequences=self.config.num_to_generate)
# [batch_size*num_return_sequences, num_steps]
# -> [batch_size, num_return_sequences, num_steps]
batched_outputs["generated_ids"] = tf.reshape(
generated_ids,
[-1, self.config.num_to_generate, generated_ids.shape[-1]])
self.model.config.output_hidden_states = True
# Convert to numpy for post-processing.
detached_outputs = {k: v.numpy() for k, v in batched_outputs.items()}
# Split up batched outputs, then post-process each example.
unbatched_outputs = utils.unbatch_preds(detached_outputs)
return list(map(self._postprocess, unbatched_outputs))
def input_spec(self):
return {
"input_text": lit_types.TextSegment(),
"target_text": lit_types.TextSegment(required=False),
}
def output_spec(self):
spec = {
"output_text": lit_types.GeneratedText(parent="target_text"),
"input_tokens": lit_types.Tokens(parent="input_text"),
"encoder_final_embedding": lit_types.Embeddings(),
# If target text is given, the following will also be populated.
"target_tokens": lit_types.Tokens(parent="target_text"),
"pred_tokens": lit_types.TokenTopKPreds(align="target_tokens"),
}
if self.config.num_to_generate > 1:
spec["output_text"] = lit_types.GeneratedTextCandidates(
parent="target_text")
if self.config.output_attention:
# Add attention for each layer.
for i in range(self.num_layers):
spec[f"encoder_layer_{i+1:d}_attention"] = lit_types.AttentionHeads(
align_in="input_tokens", align_out="input_tokens")
spec[f"decoder_layer_{i+1:d}_attention"] = lit_types.AttentionHeads(
align_in="target_tokens", align_out="target_tokens")
return spec
##
# Task-specific wrapper classes.
class TranslationWrapper(lit_model.ModelWrapper):
"""Wrapper class for machine translation."""
# Mapping from generic T5 fields to this task
FIELD_RENAMES = {
"input_text": "source",
"target_text": "target",
"output_text": "translation",
}
# From Appendix D of https://arxiv.org/pdf/1910.10683.pdf.
# Add more of these if your model supports them.
LANGCODE_TO_NAME = {
"en": "English",
"de": "German",
"fr": "French",
"ro": "Romanian",
}
INPUT_TEMPLATE = "translate {source_language} to {target_language}: {source}"
def __init__(self, model: lit_model.Model):
model = validate_t5_model(model)
super().__init__(model)
def preprocess(self, ex: JsonDict) -> JsonDict:
input_kw = {
"source_language": self.LANGCODE_TO_NAME[ex["source_language"]],
"target_language": self.LANGCODE_TO_NAME[ex["target_language"]],
"source": ex["source"]
}
ret = {"input_text": self.INPUT_TEMPLATE.format(**input_kw)}
if "target" in ex:
ret["target_text"] = ex["target"]
return ret
##
# LIT API implementation
def description(self) -> str:
return "T5 for machine translation\n" + self.wrapped.description()
# TODO(b/170662608): remove these after batching API is cleaned up.
def max_minibatch_size(self) -> int:
raise NotImplementedError("Use predict() instead.")
def predict_minibatch(self, inputs):
raise NotImplementedError("Use predict() instead.")
def predict(self, inputs):
"""Predict on a single minibatch of examples."""
model_inputs = (self.preprocess(ex) for ex in inputs)
outputs = self.wrapped.predict(model_inputs)
return (utils.remap_dict(mo, self.FIELD_RENAMES) for mo in outputs)
def predict_with_metadata(self, indexed_inputs):
"""As predict(), but inputs are IndexedInput."""
return self.predict((ex["data"] for ex in indexed_inputs))
def input_spec(self):
spec = lit_types.remap_spec(self.wrapped.input_spec(), self.FIELD_RENAMES)
spec["source_language"] = lit_types.CategoryLabel()
spec["target_language"] = lit_types.CategoryLabel()
return spec
def output_spec(self):
return lit_types.remap_spec(self.wrapped.output_spec(), self.FIELD_RENAMES)
class SummarizationWrapper(lit_model.ModelWrapper):
"""Wrapper class to perform a summarization task."""
# Mapping from generic T5 fields to this task
FIELD_RENAMES = {
"input_text": "document",
"target_text": "reference",
}
def __init__(self, model: lit_model.Model):
model = validate_t5_model(model)
super().__init__(model)
# TODO(gehrmann): temp solution for ROUGE.
self._scorer = rouge_scorer.RougeScorer(["rougeL"], use_stemmer=True)
# If output is List[(str, score)] instead of just str
self._multi_output = isinstance(self.output_spec()["output_text"],
lit_types.GeneratedTextCandidates)
self._get_pred_string = (
lit_types.GeneratedTextCandidates.top_text if self._multi_output else
(lambda x: x))
def preprocess(self, ex: JsonDict) -> JsonDict:
ret = {"input_text": "summarize: " + ex["document"]}
if "reference" in ex:
ret["target_text"] = ex["reference"]
return ret
##
# LIT API implementation
def description(self) -> str:
return "T5 for summarization\n" + self.wrapped.description()
# TODO(b/170662608): remove these after batching API is cleaned up.
def max_minibatch_size(self) -> int:
raise NotImplementedError("Use predict() instead.")
def predict_minibatch(self, inputs):
raise NotImplementedError("Use predict() instead.")
def predict(self, inputs):
"""Predict on a single minibatch of examples."""
inputs = list(inputs) # needs to be referenced below, so keep full list
model_inputs = (self.preprocess(ex) for ex in inputs)
outputs = self.wrapped.predict(model_inputs)
outputs = (utils.remap_dict(mo, self.FIELD_RENAMES) for mo in outputs)
# TODO(gehrmann): temp solution to get ROUGE scores in data table.
for ex, mo in zip(inputs, outputs):
score = self._scorer.score(
target=ex["reference"],
prediction=self._get_pred_string(mo["output_text"]))
mo["rougeL"] = float(score["rougeL"].fmeasure)
yield mo
def predict_with_metadata(self, indexed_inputs):
"""As predict(), but inputs are IndexedInput."""
return self.predict((ex["data"] for ex in indexed_inputs))
def input_spec(self):
return lit_types.remap_spec(self.wrapped.input_spec(), self.FIELD_RENAMES)
def output_spec(self):
spec = lit_types.remap_spec(self.wrapped.output_spec(), self.FIELD_RENAMES)
spec["rougeL"] = lit_types.Scalar()
return spec
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import time
import warnings
from . import basex
from . import hansenlaw
from . import dasch
from . import onion_bordas
from . import direct
from . import tools
class Transform(object):
"""Abel transform image class.
This class provides whole image forward and inverse Abel
transformations, together with preprocessing (centering, symmetrizing)
and post processing (integration) functions.
The following class attributes are available, depending on the calculation.
Attributes
----------
transform : numpy 2D array
the 2D forward/reverse Abel transform.
angular_integration : tuple
radial coordinates, with the radial intensity (speed) distribution.
residual : numpy 2D array
residual image (not currently implemented).
IM: numpy 2D array
the input image, re-centered (optional) with an odd-size width
method : str
transform method, as specified by the input option.
direction: str
transform direction, as specified by the input option.
"""
_verbose = False
def __init__(self, IM,
direction='inverse', method='three_point', center='none',
symmetry_axis=None, use_quadrants=(True, True, True, True),
symmetrize_method='average', angular_integration=False,
transform_options=dict(), center_options=dict(),
angular_integration_options=dict(),
recast_as_float64=True, verbose=False):
"""The one stop transform function.
Parameters
----------
IM : a NxM numpy array
This is the image to be transformed
direction : str
The type of Abel transform to be performed.
``forward``
A 'forward' Abel transform takes a (2D) slice of a 3D image
and returns the 2D projection.
``inverse``
An 'inverse' Abel transform takes a 2D projection
and reconstructs a 2D slice of the 3D image.
The default is ``inverse``.
method : str
specifies which numerical approximation to the Abel transform
should be employed (see below). The options are
``hansenlaw``
the recursive algorithm described by Hansen and Law.
``basex``
the Gaussian "basis set expansion" method
of Dribinski et al.
``direct``
a naive implementation of the analytical
formula by Roman Yurchuk.
``two_point``
the two-point transform of Dasch (1992).
``three_point``
the three-point transform of Dasch (1992).
``onion_bordas``
the algorithm of Bordas and co-workers (1996),
re-implemented by Rallis, Wells and co-workers (2014).
``onion_peeling``
the onion peeling deconvolution as described by Dasch (1992)
center : tuple or str
If a tuple (float, float) is provided, this specifies
the image center in (y,x) (row, column) format.
A value `None` can be supplied
if no centering is desired in one dimension,
for example 'center=(None, 250)'.
If a string is provided, an automatic centering algorithm is used
``image_center``
center is assumed to be the center of the image.
``slice``
the center is found my comparing slices in the horizontal and
vertical directions
``com``
the center is calculated as the center of mass
``gaussian``
the center is found using a fit to a Gaussian function. This
only makes sense if your data looks like a Gaussian.
``none``
(Default)
No centering is performed. An image with an odd
number of columns must be provided.
symmetry_axis : None, int or tuple
Symmetrize the image about the numpy axis
0 (vertical), 1 (horizontal), (0,1) (both axes)
use_quadrants : tuple of 4 booleans
select quadrants to be used in the analysis: (Q0,Q1,Q2,Q3).
Quadrants are numbered counter-clockwide from upper right.
See note below for description of quadrants.
Default is ``(True, True, True, True)``, which uses all quadrants.
symmetrize_method: str
Method used for symmetrizing the image.
``average``
average the quadrants, in accordance with the `symmetry_axis`
``fourier``
axial symmetry implies that the Fourier components of the 2-D
projection should be real. Removing the imaginary components
in reciprocal space leaves a symmetric projection.
ref: Overstreet, K., et al.
"Multiple scattering and the density distribution of a Cs MOT."
Optics express 13.24 (2005): 9672-9682.
http://dx.doi.org/10.1364/OPEX.13.009672
angular_integration: boolean
integrate the image over angle to give the radial (speed) intensity
distribution
transform_options : tuple
Additional arguments passed to the individual transform functions.
See the documentation for the individual transform method for options.
center_options : tuple
Additional arguments to be passed to the centering function.
angular_integration_options : tuple (or dict)
Additional arguments passed to the angular_integration transform
functions. See the documentation for angular_integration for options.
recast_as_float64 : boolean
True/False that determines if the input image should be recast to
``float64``. Many images are imported in other formats (such as
``uint8`` or ``uint16``) and this does not always play well with the
transorm algorithms. This should probably always be set to True.
(Default is True.)
verbose : boolean
True/False to determine if non-critical output should be printed.
.. note:: Quadrant combining
The quadrants can be combined (averaged) using the
``use_quadrants`` keyword in order to provide better data quality.
The quadrants are numbered starting from
Q0 in the upper right and proceeding counter-clockwise: ::
+--------+--------+
| Q1 * | * Q0 |
| * | * |
| * | * | AQ1 | AQ0
+--------o--------+ --(inverse Abel transform)--> ----o----
| * | * | AQ2 | AQ3
| * | * |
| Q2 * | * Q3 | AQi == inverse Abel transform
+--------+--------+ of quadrant Qi
Three cases are possible:
1) symmetry_axis = 0 (vertical): ::
Combine: Q01 = Q0 + Q1, Q23 = Q2 + Q3
inverse image AQ01 | AQ01
-----o----- (left and right sides equivalent)
AQ23 | AQ23
2) symmetry_axis = 1 (horizontal): ::
Combine: Q12 = Q1 + Q2, Q03 = Q0 + Q3
inverse image AQ12 | AQ03
-----o----- (top and bottom equivalent)
AQ12 | AQ03
3) symmetry_axis = (0, 1) (both): ::
Combine: Q = Q0 + Q1 + Q2 + Q3
inverse image AQ | AQ
---o--- (all quadrants equivalent)
AQ | AQ
Notes
-----
As mentioned above, PyAbel offers several different approximations
to the the exact abel transform.
All the the methods should produce similar results, but
depending on the level and type of noise found in the image,
certain methods may perform better than others. Please see the
"Transform Methods" section of the documentation for complete information.
``hansenlaw``
This "recursive algorithm" produces reliable results
and is quite fast (~0.1 sec for a 1001x1001 image).
It makes no assumptions about the data
(apart from cylindrical symmetry). It tends to require that the data
is finely sampled for good convergence.
E. W. Hansen and P.-L. Law "Recursive methods for computing
the Abel transform and its inverse"
J. Opt. Soc. A*2, 510-520 (1985)
http://dx.doi.org/10.1364/JOSAA.2.000510
``basex`` *
The "basis set exapansion" algorithm describes the data in terms
of gaussian functions, which themselves can be abel transformed
analytically. Because the gaussian functions are approximately the
size of each pixel, this method also does not make any assumption
about the shape of the data. This method is one of the de-facto
standards in photoelectron/photoion imaging.
Dribinski et al, 2002 (Rev. Sci. Instrum. 73, 2634)
http://dx.doi.org/10.1063/1.1482156
``direct``
This method attempts a direct integration of the Abel
transform integral. It makes no assumptions about the data
(apart from cylindrical symmetry),
but it typically requires fine sampling to converge.
Such methods are typically inefficient,
but thanks to this Cython implementation (by Roman Yurchuk),
this 'direct' method is competitive with the other methods.
``onion_bordas``
The onion peeling method, also known as "back projection",
originates from Bordas *et al.* `Rev. Sci. Instrum. 67, 2257 (1996)`_.
.. _Rev. Sci. Instrum. 67, 2257 (1996): <http://scitation.aip.org/content/aip/journal/rsi/67/6/10.1063/1.1147044>
The algorithm was subsequently coded in MatLab by Rallis, Wells and co-workers, `Rev. Sci. Instrum. 85, 113105 (2014)`_.
.. _Rev. Sci. Instrum. 85, 113105 (2014): <http://scitation.aip.org/content/aip/journal/rsi/85/11/10.1063/1.4899267>
which was used as the basis of this Python port. See issue `#56`_.
.. _#56: <https://github.com/PyAbel/PyAbel/issues/56>
``onion_peeling``
This is one of the most compact and fast algorithms, with the
inverse Abel transfrom achieved in one Python code-line, PR #155.
See also ``three_point`` is the onion peeling algorithm as
described by Dasch (1992), reference below.
``two_point``
Another Dasch method. Simple, and fast, but not as accurate as the
other methods.
``three_point`` *
The "Three Point" Abel transform method
exploits the observation that the value of the Abel inverted data
at any radial position r is primarily determined from changes
in the projection data in the neighborhood of r.
This method is also very efficient
once it has generated the basis sets.
Dasch, 1992 (Applied Optics, Vol 31, No 8, March 1992, Pg 1146-1152).
``*``
The methods marked with a * indicate methods that generate basis sets.
The first time they are run for a new image size,
it takes seconds to minutes to generate the basis set.
However, this basis set is saved to disk can can be reloaded,
meaning that future transforms are performed
much more quickly.
"""
# public class variables
self.IM = IM # (optionally) centered, odd-width image
self.method = method
self.direction = direction
# private internal variables
self._symmetry_axis = symmetry_axis
self._symmetrize_method = symmetrize_method
self._use_quadrants = use_quadrants
self._recast_as_float64 = recast_as_float64
_verbose = verbose
# image processing
self._verify_some_inputs()
self._center_image(center, **center_options)
self._abel_transform_image(**transform_options)
self._integration(angular_integration, transform_options,
**angular_integration_options)
# end of class instance
_verboseprint = print if _verbose else lambda *a, **k: None
def _verify_some_inputs(self):
if self.IM.ndim == 1 or np.shape(self.IM)[0] <= 2:
raise ValueError('Data must be 2-dimensional. \
To transform a single row \
use the individual transform function.')
if not np.any(self._use_quadrants):
raise ValueError('No image quadrants selected to use')
if not isinstance(self._symmetry_axis, (list, tuple)):
# if the user supplies an int, make it into a 1-element list:
self._symmetry_axis = [self._symmetry_axis]
if self._recast_as_float64:
self.IM = self.IM.astype('float64')
def _center_image(self, center, **center_options):
if center != "none":
self.IM = tools.center.center_image(self.IM, center,
**center_options)
def _abel_transform_image(self, **transform_options):
abel_transform = {
"basex": basex.basex_transform,
"direct": direct.direct_transform,
"hansenlaw": hansenlaw.hansenlaw_transform,
"onion_bordas": onion_bordas.onion_bordas_transform,
"onion_peeling": dasch.onion_peeling_transform,
"two_point": dasch.two_point_transform,
"three_point": dasch.three_point_transform,
}
self._verboseprint('Calculating {0} Abel transform using {1} method -'
.format(self.direction, self.method),
'\n image size: {:d}x{:d}'.format(*self.IM.shape))
t0 = time.time()
# split image into quadrants
Q0, Q1, Q2, Q3 = tools.symmetry.get_image_quadrants(
self.IM, reorient=True,
symmetry_axis=self._symmetry_axis,
symmetrize_method=self._symmetrize_method)
def selected_transform(Z):
return abel_transform[self.method](Z, direction=self.direction,
**transform_options)
AQ0 = AQ1 = AQ2 = AQ3 = None
# Inverse Abel transform for quadrant 1 (all include Q1)
AQ1 = selected_transform(Q1)
if 0 in self._symmetry_axis:
AQ2 = selected_transform(Q2)
if 1 in self._symmetry_axis:
AQ0 = selected_transform(Q0)
if None in self._symmetry_axis:
AQ0 = selected_transform(Q0)
AQ2 = selected_transform(Q2)
AQ3 = selected_transform(Q3)
# reassemble image
self.transform = tools.symmetry.put_image_quadrants(
(AQ0, AQ1, AQ2, AQ3),
original_image_shape=self.IM.shape,
symmetry_axis=self._symmetry_axis)
self._verboseprint("{:.2f} seconds".format(time.time()-t0))
def _integration(self, angular_integration, transform_options,
**angular_integration_options):
if angular_integration:
if 'dr' in transform_options and\
'dr' not in angular_integration_options:
# assume user forgot to pass grid size
angular_integration_options['dr'] = transform_options['dr']
self.angular_integration = tools.vmi.angular_integration(
self.transform,
**angular_integration_options)
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
NX_STATUS_QUALIFIER = (0x3FF63000)
NX_STATUS_WARNING = (0x00000000)
NX_STATUS_ERROR = (0x80000000)
NX_WARNING_BASE = (NX_STATUS_QUALIFIER | NX_STATUS_WARNING)
NX_ERROR_BASE = (NX_STATUS_QUALIFIER | NX_STATUS_ERROR)
NX_SUCCESS = 0
NX_ERR_INTERNAL_ERROR = (NX_ERROR_BASE | 0x001)
NX_ERR_SELF_TEST_ERROR1 = (NX_ERROR_BASE | 0x002)
NX_ERR_SELF_TEST_ERROR2 = (NX_ERROR_BASE | 0x003)
NX_ERR_SELF_TEST_ERROR3 = (NX_ERROR_BASE | 0x004)
NX_ERR_SELF_TEST_ERROR4 = (NX_ERROR_BASE | 0x005)
NX_ERR_SELF_TEST_ERROR5 = (NX_ERROR_BASE | 0x006)
NX_ERR_POWER_SUSPENDED = (NX_ERROR_BASE | 0x007)
NX_ERR_OUTPUT_QUEUE_OVERFLOW = (NX_ERROR_BASE | 0x008)
NX_ERR_FIRMWARE_NO_RESPONSE = (NX_ERROR_BASE | 0x009)
NX_ERR_EVENT_TIMEOUT = (NX_ERROR_BASE | 0x00A)
NX_ERR_INPUT_QUEUE_OVERFLOW = (NX_ERROR_BASE | 0x00B)
NX_ERR_INPUT_QUEUE_READ_SIZE = (NX_ERROR_BASE | 0x00C)
NX_ERR_DUPLICATE_FRAME_OBJECT = (NX_ERROR_BASE | 0x00D)
NX_ERR_DUPLICATE_STREAM_OBJECT = (NX_ERROR_BASE | 0x00E)
NX_ERR_SELF_TEST_NOT_POSSIBLE = (NX_ERROR_BASE | 0x00F)
NX_ERR_MEMORY_FULL = (NX_ERROR_BASE | 0x010)
NX_ERR_MAX_SESSIONS = (NX_ERROR_BASE | 0x011)
NX_ERR_MAX_FRAMES = (NX_ERROR_BASE | 0x012)
NX_ERR_MAX_DEVICES = (NX_ERROR_BASE | 0x013)
NX_ERR_MISSING_FILE = (NX_ERROR_BASE | 0x014)
NX_ERR_PARAMETER_NULL_OR_EMPTY = (NX_ERROR_BASE | 0x015)
NX_ERR_MAX_SCHEDULES = (NX_ERROR_BASE | 0x016)
NX_ERR_SELF_TEST_ERROR6 = (NX_ERROR_BASE | 0x017)
NX_ERR_SELF_TEST_IN_PROGRESS = (NX_ERROR_BASE | 0x018)
NX_ERR_INVALID_SESSION_HANDLE = (NX_ERROR_BASE | 0x020)
NX_ERR_INVALID_SYSTEM_HANDLE = (NX_ERROR_BASE | 0x021)
NX_ERR_DEVICE_HANDLE_EXPECTED = (NX_ERROR_BASE | 0x022)
NX_ERR_INTF_HANDLE_EXPECTED = (NX_ERROR_BASE | 0x023)
NX_ERR_PROPERTY_MODE_CONFLICTING = (NX_ERROR_BASE | 0x024)
NX_ERR_TIMING_SOURCE_NOT_SUPPORTED = (NX_ERROR_BASE | 0x025)
NX_ERR_MULTIPLE_TIMING_SOURCE = (NX_ERROR_BASE | 0x026)
NX_ERR_OVERLAPPING_IO = (NX_ERROR_BASE | 0x027)
NX_ERR_MISSING_BUS_POWER = (NX_ERROR_BASE | 0x028)
NX_ERR_CDAQ_CONNECTION_LOST = (NX_ERROR_BASE | 0x029)
NX_ERR_INVALID_TRANSCEIVER = (NX_ERROR_BASE | 0x071)
NX_ERR_INVALID_BAUD_RATE = (NX_ERROR_BASE | 0x072)
NX_ERR_BAUD_RATE_NOT_CONFIGURED = (NX_ERROR_BASE | 0x073)
NX_ERR_INVALID_BIT_TIMINGS = (NX_ERROR_BASE | 0x074)
NX_ERR_BAUD_RATE_XCVR_MISMATCH = (NX_ERROR_BASE | 0x075)
NX_ERR_UNKNOWN_TIMING_SOURCE = (NX_ERROR_BASE | 0x076)
NX_ERR_UNKNOWN_SYNCHRONIZATION_SOURCE = (NX_ERROR_BASE | 0x077)
NX_ERR_MISSING_TIMEBASE_SOURCE = (NX_ERROR_BASE | 0x078)
NX_ERR_UNKNOWN_TIMEBASE_FREQUENCY = (NX_ERROR_BASE | 0x079)
NX_ERR_UNCONNECTED_SYNCHRONIZATION_SOURCE = (NX_ERROR_BASE | 0x07A)
NX_ERR_CONNECTED_SYNCHRONIZATION_TERMINAL = (NX_ERROR_BASE | 0x07B)
NX_ERR_INVALID_SYNCHRONIZATION_SOURCE = (NX_ERROR_BASE | 0x07C)
NX_ERR_INVALID_SYNCHRONIZATION_DESTINATION = (NX_ERROR_BASE | 0x07D)
NX_ERR_INVALID_SYNCHRONIZATION_COMBINATION = (NX_ERROR_BASE | 0x07E)
NX_ERR_TIMEBASE_DISAPPEARED = (NX_ERROR_BASE | 0x07F)
NX_ERR_MACROTICK_DISCONNECTED = (NX_ERROR_BASE | 0x080)
NX_ERR_CANNOT_OPEN_DATABASE_FILE = (NX_ERROR_BASE | 0x081)
NX_ERR_CLUSTER_NOT_FOUND = (NX_ERROR_BASE | 0x082)
NX_ERR_FRAME_NOT_FOUND = (NX_ERROR_BASE | 0x083)
NX_ERR_SIGNAL_NOT_FOUND = (NX_ERROR_BASE | 0x084)
NX_ERR_UNCONFIGURED_CLUSTER = (NX_ERROR_BASE | 0x085)
NX_ERR_UNCONFIGURED_FRAME = (NX_ERROR_BASE | 0x086)
NX_ERR_UNCONFIGURED_SIGNAL = (NX_ERROR_BASE | 0x087)
NX_ERR_MULTIPLE_CLUSTERS = (NX_ERROR_BASE | 0x088)
NX_ERR_SUBORDINATE_NOT_ALLOWED = (NX_ERROR_BASE | 0x089)
NX_ERR_INVALID_INTERFACE = (NX_ERROR_BASE | 0x08A)
NX_ERR_INVALID_PROTOCOL = (NX_ERROR_BASE | 0x08B)
NX_ERR_INPUT_SESSION_MUST_AUTO_START = (NX_ERROR_BASE | 0x08C)
NX_ERR_INVALID_PROPERTY_ID = (NX_ERROR_BASE | 0x08D)
NX_ERR_INVALID_PROPERTY_SIZE = (NX_ERROR_BASE | 0x08E)
NX_ERR_INCORRECT_MODE = (NX_ERROR_BASE | 0x08F)
NX_ERR_BUFFER_TOO_SMALL = (NX_ERROR_BASE | 0x090)
NX_ERR_MUST_SPECIFY_MULTIPLEXERS = (NX_ERROR_BASE | 0x091)
NX_ERR_SESSION_NOT_FOUND = (NX_ERROR_BASE | 0x092)
NX_ERR_MULTIPLE_USE_OF_SESSION = (NX_ERROR_BASE | 0x093)
NX_ERR_ONLY_ONE_FRAME = (NX_ERROR_BASE | 0x094)
NX_ERR_DUPLICATE_ALIAS = (NX_ERROR_BASE | 0x095)
NX_ERR_DEPLOYMENT_IN_PROGRESS = (NX_ERROR_BASE | 0x096)
NX_ERR_NO_FRAMES_OR_SIGNALS = (NX_ERROR_BASE | 0x097)
NX_ERR_INVALID_MODE = (NX_ERROR_BASE | 0x098)
NX_ERR_NEED_REFERENCE = (NX_ERROR_BASE | 0x099)
NX_ERR_DIFFERENT_CLUSTER_OPEN = (NX_ERROR_BASE | 0x09A)
NX_ERR_FLEX_RAY_INVALID_CYCLE_REP = (NX_ERROR_BASE | 0x09B)
NX_ERR_SESSION_CLEARED = (NX_ERROR_BASE | 0x09C)
NX_ERR_WRONG_MODE_FOR_CREATE_SELECTION = (NX_ERROR_BASE | 0x09D)
NX_ERR_INTERFACE_RUNNING = (NX_ERROR_BASE | 0x09E)
NX_ERR_FRAME_WRITE_TOO_LARGE = (NX_ERROR_BASE | 0x09F)
NX_ERR_TIMEOUT_WITHOUT_NUM_TO_READ = (NX_ERROR_BASE | 0x0A0)
NX_ERR_TIMESTAMPS_NOT_SUPPORTED = (NX_ERROR_BASE | 0x0A1)
NX_ERR_UNKNOWN_CONDITION = (NX_ERROR_BASE | 0x0A2)
NX_ERR_SESSION_NOT_STARTED = (NX_ERROR_BASE | 0x0A3)
NX_ERR_MAX_WAITS_EXCEEDED = (NX_ERROR_BASE | 0x0A4)
NX_ERR_INVALID_DEVICE = (NX_ERROR_BASE | 0x0A5)
NX_ERR_INVALID_TERMINAL_NAME = (NX_ERROR_BASE | 0x0A6)
NX_ERR_PORT_LE_DS_BUSY = (NX_ERROR_BASE | 0x0A7)
NX_ERR_INVALID_KEYSLOT = (NX_ERROR_BASE | 0x0A8)
NX_ERR_MAX_QUEUE_SIZE_EXCEEDED = (NX_ERROR_BASE | 0x0A9)
NX_ERR_FRAME_SIZE_MISMATCH = (NX_ERROR_BASE | 0x0AA)
NX_ERR_INDEX_TOO_BIG = (NX_ERROR_BASE | 0x0AB)
NX_ERR_SESSION_MODE_INCOMPATIBILITY = (NX_ERROR_BASE | 0x0AC)
NX_ERR_SESSION_TYPE_FRAME_INCOMPATIBILITY = (NX_ERROR_BASE | 0x15D)
NX_ERR_TRIGGER_SIGNAL_NOT_ALLOWED = (NX_ERROR_BASE | 0x0AD)
NX_ERR_ONLY_ONE_CLUSTER = (NX_ERROR_BASE | 0x0AE)
NX_ERR_CONVERT_INVALID_PAYLOAD = (NX_ERROR_BASE | 0x0AF)
NX_ERR_MEMORY_FULL_READ_DATA = (NX_ERROR_BASE | 0x0B0)
NX_ERR_MEMORY_FULL_FIRMWARE = (NX_ERROR_BASE | 0x0B1)
NX_ERR_COMMUNICATION_LOST = (NX_ERROR_BASE | 0x0B2)
NX_ERR_INVALID_PRIORITY = (NX_ERROR_BASE | 0x0B3)
NX_ERR_SYNCHRONIZATION_NOT_ALLOWED = (NX_ERROR_BASE | 0x0B4)
NX_ERR_TIME_NOT_REACHED = (NX_ERROR_BASE | 0x0B5)
NX_ERR_INTERNAL_INPUT_QUEUE_OVERFLOW = (NX_ERROR_BASE | 0x0B6)
NX_ERR_BAD_IMAGE_FILE = (NX_ERROR_BASE | 0x0B7)
NX_ERR_INVALID_LOGFILE = (NX_ERROR_BASE | 0x0B8)
NX_ERR_DONGLE_COMMUNICATION_LOST = (NX_ERROR_BASE | 0xB9)
NX_ERR_INVALID_PROPERTY_VALUE = (NX_ERROR_BASE | 0x0C0)
NX_ERR_FLEX_RAY_INTEGRATION_FAILED = (NX_ERROR_BASE | 0x0C1)
NX_ERR_PDU_NOT_FOUND = (NX_ERROR_BASE | 0x0D0)
NX_ERR_UNCONFIGURED_PDU = (NX_ERROR_BASE | 0x0D1)
NX_ERR_DUPLICATE_PDU_OBJECT = (NX_ERROR_BASE | 0x0D2)
NX_ERR_NEED_PDU = (NX_ERROR_BASE | 0x0D3)
NX_ERR_RPC_COMMUNICATION = (NX_ERROR_BASE | 0x100)
NX_ERR_FILE_TRANSFER_COMMUNICATION = (NX_ERROR_BASE | 0x101)
NX_ERR_FTP_COMMUNICATION = (NX_ERROR_BASE | 0x101)
NX_ERR_FILE_TRANSFER_ACCESS = (NX_ERROR_BASE | 0x102)
NX_ERR_FTP_FILE_ACCESS = (NX_ERROR_BASE | 0x102)
NX_ERR_DATABASE_ALREADY_IN_USE = (NX_ERROR_BASE | 0x103)
NX_ERR_INTERNAL_FILE_ACCESS = (NX_ERROR_BASE | 0x104)
NX_ERR_FILE_TRANSFER_ACTIVE = (NX_ERROR_BASE | 0x105)
NX_ERR_DLL_LOAD = (NX_ERROR_BASE | 0x117)
NX_ERR_OBJECT_STARTED = (NX_ERROR_BASE | 0x11E)
NX_ERR_DEFAULT_PAYLOAD_NUM_BYTES = (NX_ERROR_BASE | 0x11F)
NX_ERR_INVALID_ARBITRATION_ID = (NX_ERROR_BASE | 0x123)
NX_ERR_INVALID_LIN_ID = (NX_ERROR_BASE | 0x124)
NX_ERR_TOO_MANY_OPEN_FILES = (NX_ERROR_BASE | 0x130)
NX_ERR_DATABASE_BAD_REFERENCE = (NX_ERROR_BASE | 0x131)
NX_ERR_CREATE_DATABASE_FILE = (NX_ERROR_BASE | 0x132)
NX_ERR_DUPLICATE_CLUSTER_NAME = (NX_ERROR_BASE | 0x133)
NX_ERR_DUPLICATE_FRAME_NAME = (NX_ERROR_BASE | 0x134)
NX_ERR_DUPLICATE_SIGNAL_NAME = (NX_ERROR_BASE | 0x135)
NX_ERR_DUPLICATE_ECU_NAME = (NX_ERROR_BASE | 0x136)
NX_ERR_DUPLICATE_SUBFRAME_NAME = (NX_ERROR_BASE | 0x137)
NX_ERR_IMPROPER_PROTOCOL = (NX_ERROR_BASE | 0x138)
NX_ERR_OBJECT_RELATION = (NX_ERROR_BASE | 0x139)
NX_ERR_UNCONFIGURED_REQUIRED_PROPERTY = (NX_ERROR_BASE | 0x13B)
NX_ERR_NOT_SUPPORTED_ON_RT = (NX_ERROR_BASE | 0x13C)
NX_ERR_NAME_SYNTAX = (NX_ERROR_BASE | 0x13D)
NX_ERR_FILE_EXTENSION = (NX_ERROR_BASE | 0x13E)
NX_ERR_DATABASE_OBJECT_NOT_FOUND = (NX_ERROR_BASE | 0x13F)
NX_ERR_REMOVE_DATABASE_CACHE_FILE = (NX_ERROR_BASE | 0x140)
NX_ERR_READ_ONLY_PROPERTY = (NX_ERROR_BASE | 0x141)
NX_ERR_FRAME_MUX_EXISTS = (NX_ERROR_BASE | 0x142)
NX_ERR_UNDEFINED_FIRST_SLOT = (NX_ERROR_BASE | 0x144)
NX_ERR_UNDEFINED_FIRST_CHANNELS = (NX_ERROR_BASE | 0x145)
NX_ERR_UNDEFINED_PROTOCOL = (NX_ERROR_BASE | 0x146)
NX_ERR_OLD_DATABASE_CACHE_FILE = (NX_ERROR_BASE | 0x147)
NX_ERR_DB_CONFIG_SIG_OUT_OF_FRAME = (NX_ERROR_BASE | 0x148)
NX_ERR_DB_CONFIG_SIG_OVERLAPPED = (NX_ERROR_BASE | 0x149)
NX_ERR_DB_CONFIG_SIG52_BIT_INTEGER = (NX_ERROR_BASE | 0x14A)
NX_ERR_DB_CONFIG_FRAME_NUM_BYTES = (NX_ERROR_BASE | 0x14B)
NX_ERR_MULT_SYNC_STARTUP = (NX_ERROR_BASE | 0x14C)
NX_ERR_INVALID_CLUSTER = (NX_ERROR_BASE | 0x14D)
NX_ERR_DATABASE_NAME = (NX_ERROR_BASE | 0x14E)
NX_ERR_DATABASE_OBJECT_LOCKED = (NX_ERROR_BASE | 0x14F)
NX_ERR_ALIAS_NOT_FOUND = (NX_ERROR_BASE | 0x150)
NX_ERR_CLUSTER_FRAME_CHANNEL_RELATION = (NX_ERROR_BASE | 0x151)
NX_ERR_DYN_FLEX_RAY_FRAME_CHAN_AAND_B = (NX_ERROR_BASE | 0x152)
NX_ERR_DATABASE_LOCKED_IN_USE = (NX_ERROR_BASE | 0x153)
NX_ERR_AMBIGUOUS_FRAME_NAME = (NX_ERROR_BASE | 0x154)
NX_ERR_AMBIGUOUS_SIGNAL_NAME = (NX_ERROR_BASE | 0x155)
NX_ERR_AMBIGUOUS_ECU_NAME = (NX_ERROR_BASE | 0x156)
NX_ERR_AMBIGUOUS_SUBFRAME_NAME = (NX_ERROR_BASE | 0x157)
NX_ERR_AMBIGUOUS_SCHEDULE_NAME = (NX_ERROR_BASE | 0x158)
NX_ERR_DUPLICATE_SCHEDULE_NAME = (NX_ERROR_BASE | 0x159)
NX_ERR_DIAGNOSTIC_SCHEDULE_NOT_DEFINED = (NX_ERROR_BASE | 0x18F)
NX_ERR_PROTOCOL_MUX_NOT_SUPPORTED = (NX_ERROR_BASE | 0x15A)
NX_ERR_SAVE_LI_NNOT_SUPPORTED = (NX_ERROR_BASE | 0x15B)
NX_ERR_LI_NMASTER_NOT_DEFINED = (NX_ERROR_BASE | 0x15C)
NX_ERR_MIX_AUTO_MANUAL_OPEN = (NX_ERROR_BASE | 0x15E)
NX_ERR_AUTO_OPEN_NOT_SUPPORTED = (NX_ERROR_BASE | 0x15F)
NX_ERR_WRONG_NUM_SIGNALS_WRITTEN = (NX_ERROR_BASE | 0x160)
NX_ERR_MULTIPLE_LV_PROJECT = (NX_ERROR_BASE | 0x161)
NX_ERR_SESSION_CONFLICT_LV_PROJECT = (NX_ERROR_BASE | 0x162)
NX_ERR_DB_OBJECT_NAME_EMPTY = (NX_ERROR_BASE | 0x163)
NX_ERR_MISSING_ALIAS_IN_DB_OBJECT_NAME = (NX_ERROR_BASE | 0x164)
NX_ERR_FIBEX_IMPORT_VERSION = (NX_ERROR_BASE | 0x165)
NX_ERR_EMPTY_SESSION_NAME = (NX_ERROR_BASE | 0x166)
NX_ERR_NOT_ENOUGH_MESSAGE_RAM_FOR_OBJECT = (NX_ERROR_BASE | 0x167)
NX_ERR_KEY_SLOT_ID_CONFIG = (NX_ERROR_BASE | 0x168)
NX_ERR_UNSUPPORTED_SESSION = (NX_ERROR_BASE | 0x169)
NX_ERR_OBJECT_CREATED_AFTER_START = (NX_ERROR_BASE | 0x170)
NX_ERR_SINGLE_SLOT_ENABLED_AFTER_START = (NX_ERROR_BASE | 0x171)
NX_ERR_UNSUPPORTED_NUM_MACROTICKS = (NX_ERROR_BASE | 0x172)
NX_ERR_BAD_SYNTAX_IN_DATABASE_OBJECT_NAME = (NX_ERROR_BASE | 0x173)
NX_ERR_AMBIGUOUS_SCHEDULE_ENTRY_NAME = (NX_ERROR_BASE | 0x174)
NX_ERR_DUPLICATE_SCHEDULE_ENTRY_NAME = (NX_ERROR_BASE | 0x175)
NX_ERR_UNDEFINED_FRAME_ID = (NX_ERROR_BASE | 0x176)
NX_ERR_UNDEFINED_FRAME_PAYLOAD_LENGTH = (NX_ERROR_BASE | 0x177)
NX_ERR_UNDEFINED_SIGNAL_START_BIT = (NX_ERROR_BASE | 0x178)
NX_ERR_UNDEFINED_SIGNAL_NUM_BITS = (NX_ERROR_BASE | 0x179)
NX_ERR_UNDEFINED_SIGNAL_BYTE_ORDER = (NX_ERROR_BASE | 0x17A)
NX_ERR_UNDEFINED_SIGNAL_DATA_TYPE = (NX_ERROR_BASE | 0x17B)
NX_ERR_UNDEFINED_SUBF_MUX_VALUE = (NX_ERROR_BASE | 0x17C)
NX_ERR_INVALID_LIN_SCHED_INDEX = (NX_ERROR_BASE | 0x17D)
NX_ERR_INVALID_LIN_SCHED_NAME = (NX_ERROR_BASE | 0x17E)
NX_ERR_INVALID_ACTIVE_FRAME_INDEX = (NX_ERROR_BASE | 0x17F)
NX_ERR_INVALID_ACTIVE_FRAME_NAME = (NX_ERROR_BASE | 0x180)
NX_ERR_AMBIGUOUS_PDU = (NX_ERROR_BASE | 0x181)
NX_ERR_DUPLICATE_PDU = (NX_ERROR_BASE | 0x182)
NX_ERR_NUMBER_OF_PD_US = (NX_ERROR_BASE | 0x183)
NX_ERR_PD_US_REQUIRED = (NX_ERROR_BASE | 0x184)
NX_ERR_MAX_PD_US = (NX_ERROR_BASE | 0x185)
NX_ERR_UNSUPPORTED_MODE = (NX_ERROR_BASE | 0x186)
NX_ERR_BAD_FPGA_SIGNATURE = (NX_ERROR_BASE | 0x187)
NX_ERR_BADC_SERIES_FPGA_SIGNATURE = (NX_ERR_BAD_FPGA_SIGNATURE)
NX_ERR_BAD_FPGA_REVISION = (NX_ERROR_BASE | 0x188)
NX_ERR_BADC_SERIES_FPGA_REVISION = (NX_ERR_BAD_FPGA_REVISION)
NX_ERR_BAD_FPGA_REVISION_ON_TARGET = (NX_ERROR_BASE | 0x189)
NX_ERR_ROUTE_IN_USE = (NX_ERROR_BASE | 0x18A)
NX_ERR_DA_QMX_INCORRECT_VERSION = (NX_ERROR_BASE | 0x18B)
NX_ERR_ADD_ROUTE = (NX_ERROR_BASE | 0x18C)
NX_ERR_REMOTE_SLEEP_ON_LIN_SLAVE = (NX_ERROR_BASE | 0x18D)
NX_ERR_SLEEP_WAKEUP_NOT_SUPPORTED = (NX_ERROR_BASE | 0x18E)
NX_ERR_LIN_TRANSPORT_LAYER = (NX_ERROR_BASE | 0x192)
NX_ERR_LOGFILE = (NX_ERROR_BASE | 0x193)
NX_ERR_STRM_OUT_TMG_LIN_SCHEDULER_CONFLICT = (NX_ERROR_BASE | 0x200)
NX_ERR_SESSN_TYPE_LIN_INTF_PRS_INCOMPATIBLE = (NX_ERROR_BASE | 0x201)
NX_ERR_SAVE_CLUSTER_ONLY = (NX_ERROR_BASE | 0x202)
NX_ERR_SAVE_LDF_CLUSTER_ONLY = NX_ERR_SAVE_CLUSTER_ONLY
NX_ERR_DUPLICATE_INTERFACE_NAME = (NX_ERROR_BASE | 0x203)
NX_ERR_INCOMPATIABLE_TRANSCEIVER_REVISION = (NX_ERROR_BASE | 0x204)
NX_ERR_INCOMPATIABLE_TRANSCEIVER_IMAGE = (NX_ERROR_BASE | 0x205)
NX_ERR_PROPERTY_NOTSUPPORTED = (NX_ERROR_BASE | 0x206)
NX_ERR_EXPORT_SEMANTIC = (NX_ERROR_BASE | 0x207)
NX_ERR_J1939_QUEUE_OVERFLOW = (NX_ERROR_BASE | 0x0208)
NX_ERR_NON_J1939_FRAME_SIZE = (NX_ERROR_BASE | 0x0209)
NX_ERR_J1939_MISSING_ADDRESS = (NX_ERROR_BASE | 0x020A)
NX_ERR_J1939_ADDRESS_LOST = (NX_ERROR_BASE | 0x020B)
NX_ERR_J1939_CTS_NEXT_PCK_LARGER_TOTAL_PCK_NUM = (NX_ERROR_BASE | 0x020C)
NX_ERR_J1939_CTS_NEXT_PCK = (NX_ERROR_BASE | 0x020D)
NX_ERR_J1939_CTS_NEXT_PCK_NULL = (NX_ERROR_BASE | 0x020E)
NX_ERR_J1939_CTS_PGN = (NX_ERROR_BASE | 0x020F)
NX_ERR_J1939_UNEXPECTED_SEQ_NUM = (NX_ERROR_BASE | 0x0210)
NX_ERR_J1939_MORE_PCK_REQ_THAN_ALLOWED = (NX_ERROR_BASE | 0x0211)
NX_ERR_J1939_TIMEOUT_T1 = (NX_ERROR_BASE | 0x0212)
NX_ERR_J1939_TIMEOUT_T2 = (NX_ERROR_BASE | 0x0213)
NX_ERR_J1939_TIMEOUT_T3 = (NX_ERROR_BASE | 0x0214)
NX_ERR_J1939_TIMEOUT_T4 = (NX_ERROR_BASE | 0x0215)
NX_ERR_J1939_RTS_DLC = (NX_ERROR_BASE | 0x0216)
NX_ERR_J1939_CTS_DLC = (NX_ERROR_BASE | 0x0217)
NX_ERR_J1939_BAM_DLC = (NX_ERROR_BASE | 0x0218)
NX_ERR_J1939_DT_DLC = (NX_ERROR_BASE | 0x0219)
NX_ERR_J1939_ABORT_DLC = (NX_ERROR_BASE | 0x021A)
NX_ERR_J1939_EOMA_DLC = (NX_ERROR_BASE | 0x021B)
NX_ERR_J1939_ABORT_PGN = (NX_ERROR_BASE | 0x021C)
NX_ERR_J1939_CTS_HOLD_MSG = (NX_ERROR_BASE | 0x021D)
NX_ERR_J1939_INVALID_TOTAL_SIZE = (NX_ERROR_BASE | 0x021E)
NX_ERR_J1939_TOTAL_PCK_NUM = (NX_ERROR_BASE | 0x021F)
NX_ERR_J1939_RESERVED_DATA = (NX_ERROR_BASE | 0x0220)
NX_ERR_J1939_NOT_ENOUGH_SYS_RES = (NX_ERROR_BASE | 0x0221)
NX_ERR_J1939_ABORT_MSG_ACTIVE_CONNECTION = (NX_ERROR_BASE | 0x0222)
NX_ERR_J1939_ABORT_MSG_NOT_ENOUGH_SYS_RES = (NX_ERROR_BASE | 0x0223)
NX_ERR_J1939_ABORT_MSG_TIMEOUT = (NX_ERROR_BASE | 0x0224)
NX_ERR_J1939_ABORT_MSG_CTS_REC = (NX_ERROR_BASE | 0x0225)
NX_ERR_J1939_ABORT_MSG_MAX_RETRANSMIT = (NX_ERROR_BASE | 0x0226)
NX_ERR_RPC_VERSION = (NX_ERROR_BASE | 0x0227)
NX_ERR_FRAME_CAN_IO_MODE = (NX_ERROR_BASE | 0x0228)
NX_ERR_INCOMPATIBLE_FLASH = (NX_ERROR_BASE | 0x0229)
NX_ERR_TX_IO_MODE = (NX_ERROR_BASE | 0x022A)
NX_ERR_XS_DONGLE_UNSUPPORTED_BOARD = (NX_ERROR_BASE | 0x022B)
NX_ERR_INVALID_CHAR_IN_DATABASE_ALIAS = (NX_ERROR_BASE | 0x022C)
NX_ERR_INVALID_CHAR_IN_DATABASE_FILEPATH = (NX_ERROR_BASE | 0x022D)
NX_ERR_INVALID_CAN_FD_PORT_TYPE = (NX_ERROR_BASE | 0x022E)
NX_ERR_INV_UNCONDITIONAL_ENTRY = (NX_ERROR_BASE | 0x022F)
NX_ERR_EVENT_ENTRY_NO_SCHEDULE = (NX_ERROR_BASE | 0x0230)
NX_ERR_UNSUPPORTED_USB_SPEED = (NX_ERROR_BASE | 0x0231)
NX_WARN_FD_BAUD_EXCEEDS_CAPABILITY = (NX_WARNING_BASE | 0x40)
NX_WARN_DATABASE_IMPORT = (NX_WARNING_BASE | 0x085)
NX_WARN_DATABASE_IMPORT_FIBEX_NO_XNET_FILE = (NX_WARNING_BASE | 0x086)
NX_WARN_DATABASE_IMPORT_FIBEX_NO_XNET_FILE_PLUS_WARNING = (NX_WARNING_BASE | 0x087)
NX_WARN_DATABASE_BAD_REFERENCE = (NX_WARNING_BASE | 0x131)
NX_WARN_ADVANCED_PDU = (NX_WARNING_BASE | 0x132)
NX_WARN_MUX_EXCEEDS16_BIT = (NX_WARNING_BASE | 0x133)
NX_CLASS_DATABASE = 0x00000000
NX_CLASS_CLUSTER = 0x00010000
NX_CLASS_FRAME = 0x00020000
NX_CLASS_SIGNAL = 0x00030000
NX_CLASS_SUBFRAME = 0x00040000
NX_CLASS_ECU = 0x00050000
NX_CLASS_LIN_SCHED = 0x00060000
NX_CLASS_LIN_SCHED_ENTRY = 0x00070000
NX_CLASS_PDU = 0x00080000
NX_CLASS_SESSION = 0x00100000
NX_CLASS_SYSTEM = 0x00110000
NX_CLASS_DEVICE = 0x00120000
NX_CLASS_INTERFACE = 0x00130000
NX_CLASS_ALIAS = 0x00140000
NX_CLASS_MASK = 0x00FF0000
NX_PRPTYPE_U32 = 0x00000000
NX_PRPTYPE_F64 = 0x01000000
NX_PRPTYPE_BOOL = 0x02000000
NX_PRPTYPE_STRING = 0x03000000
NX_PRPTYPE_1_DSTRING = 0x04000000
NX_PRPTYPE_REF = 0x05000000
NX_PRPTYPE_1_DREF = 0x06000000
NX_PRPTYPE_TIME = 0x07000000
NX_PRPTYPE_1_DU32 = 0x08000000
NX_PRPTYPE_U64 = 0x09000000
NX_PRPTYPE_1_DU8 = 0x0A000000
NX_PRPTYPE_MASK = 0xFF000000
NX_PROP_SESSION_APPLICATION_PROTOCOL = (0x00000091 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_AUTO_START = (0x00000001 | NX_CLASS_SESSION | NX_PRPTYPE_BOOL)
NX_PROP_SESSION_CLUSTER_NAME = (0x0000000A | NX_CLASS_SESSION | NX_PRPTYPE_STRING)
NX_PROP_SESSION_DATABASE_NAME = (0x00000002 | NX_CLASS_SESSION | NX_PRPTYPE_STRING)
NX_PROP_SESSION_LIST = (0x00000003 | NX_CLASS_SESSION | NX_PRPTYPE_1_DSTRING)
NX_PROP_SESSION_MODE = (0x00000004 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_NUM_FRAMES = (0x0000000D | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_NUM_IN_LIST = (0x00000005 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_NUM_PEND = (0x00000006 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_NUM_UNUSED = (0x0000000B | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_PAYLD_LEN_MAX = (0x00000009 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_PROTOCOL = (0x00000008 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_QUEUE_SIZE = (0x0000000C | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_RESAMP_RATE = (0x00000007 | NX_CLASS_SESSION | NX_PRPTYPE_F64)
NX_PROP_SESSION_INTF_BAUD_RATE = (0x00000016 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_BAUD_RATE64 = (0x00000016 | NX_CLASS_SESSION | NX_PRPTYPE_U64)
NX_PROP_SESSION_INTF_BUS_ERR_TO_IN_STRM = (0x00000015 | NX_CLASS_SESSION | NX_PRPTYPE_BOOL)
NX_PROP_SESSION_INTF_ECHO_TX = (0x00000010 | NX_CLASS_SESSION | NX_PRPTYPE_BOOL)
NX_PROP_SESSION_INTF_NAME = (0x00000013 | NX_CLASS_SESSION | NX_PRPTYPE_STRING)
NX_PROP_SESSION_INTF_OUT_STRM_LIST = (0x00000011 | NX_CLASS_SESSION | NX_PRPTYPE_1_DREF)
NX_PROP_SESSION_INTF_OUT_STRM_TIMNG = (0x00000012 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_START_TRIG_TO_IN_STRM = (0x00000014 | NX_CLASS_SESSION | NX_PRPTYPE_BOOL)
NX_PROP_SESSION_INTF_CAN_EXT_TCVR_CONFIG = (0x00000023 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_CAN_LSTN_ONLY = (0x00000022 | NX_CLASS_SESSION | NX_PRPTYPE_BOOL)
NX_PROP_SESSION_INTF_CAN_PEND_TX_ORDER = (0x00000020 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_CAN_SING_SHOT = (0x00000024 | NX_CLASS_SESSION | NX_PRPTYPE_BOOL)
NX_PROP_SESSION_INTF_CAN_TERM = (0x00000025 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_CAN_TCVR_STATE = (0x00000028 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_CAN_TCVR_TYPE = (0x00000029 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_CAN_OUT_STRM_LIST_BY_ID = (0x00000021 | NX_CLASS_SESSION | NX_PRPTYPE_1_DU32)
NX_PROP_SESSION_INTF_CAN_IO_MODE = (0x00000026 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_CAN_FD_BAUD_RATE = (0x00000027 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_CAN_FD_BAUD_RATE64 = (0x00000027 | NX_CLASS_SESSION | NX_PRPTYPE_U64)
NX_PROP_SESSION_INTF_CAN_TX_IO_MODE = (0x00000039 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_CAN_FD_ISO_MODE = (0x0000003E | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_FLEX_RAY_ACC_START_RNG = (0x00000030 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_FLEX_RAY_ALW_HLT_CLK = (0x00000031 | NX_CLASS_SESSION | NX_PRPTYPE_BOOL)
NX_PROP_SESSION_INTF_FLEX_RAY_ALW_PASS_ACT = (0x00000032 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_FLEX_RAY_AUTO_ASLP_WHN_STP = (0x0000003A | NX_CLASS_SESSION | NX_PRPTYPE_BOOL)
NX_PROP_SESSION_INTF_FLEX_RAY_CLST_DRIFT_DMP = (0x00000033 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_FLEX_RAY_COLDSTART = (0x00000034 | NX_CLASS_SESSION | NX_PRPTYPE_BOOL)
NX_PROP_SESSION_INTF_FLEX_RAY_DEC_CORR = (0x00000035 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_FLEX_RAY_DELAY_COMP_A = (0x00000036 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_FLEX_RAY_DELAY_COMP_B = (0x00000037 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_FLEX_RAY_KEY_SLOT_ID = (0x00000038 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_FLEX_RAY_LATEST_TX = (0x00000041 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_FLEX_RAY_LIST_TIMO = (0x00000042 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_FLEX_RAY_MAC_INIT_OFF_A = (0x00000043 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_FLEX_RAY_MAC_INIT_OFF_B = (0x00000044 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_FLEX_RAY_MIC_INIT_OFF_A = (0x00000045 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_FLEX_RAY_MIC_INIT_OFF_B = (0x00000046 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_FLEX_RAY_MAX_DRIFT = (0x00000047 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_FLEX_RAY_MICROTICK = (0x00000048 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_FLEX_RAY_NULL_TO_IN_STRM = (0x00000049 | NX_CLASS_SESSION | NX_PRPTYPE_BOOL)
NX_PROP_SESSION_INTF_FLEX_RAY_OFF_CORR = (0x00000058 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_FLEX_RAY_OFF_CORR_OUT = (0x00000050 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_FLEX_RAY_RATE_CORR = (0x00000059 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_FLEX_RAY_RATE_CORR_OUT = (0x00000052 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_FLEX_RAY_SAMP_PER_MICRO = (0x00000053 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_FLEX_RAY_SING_SLOT_EN = (0x00000054 | NX_CLASS_SESSION | NX_PRPTYPE_BOOL)
NX_PROP_SESSION_INTF_FLEX_RAY_STATISTICS_EN = (0x0000005A | NX_CLASS_SESSION | NX_PRPTYPE_BOOL)
NX_PROP_SESSION_INTF_FLEX_RAY_SYM_TO_IN_STRM = (0x0000003D | NX_CLASS_SESSION | NX_PRPTYPE_BOOL)
NX_PROP_SESSION_INTF_FLEX_RAY_SYNC_CH_A_EVEN = (0x0000005B | NX_CLASS_SESSION | NX_PRPTYPE_1_DU32)
NX_PROP_SESSION_INTF_FLEX_RAY_SYNC_CH_A_ODD = (0x0000005C | NX_CLASS_SESSION | NX_PRPTYPE_1_DU32)
NX_PROP_SESSION_INTF_FLEX_RAY_SYNC_CH_B_EVEN = (0x0000005D | NX_CLASS_SESSION | NX_PRPTYPE_1_DU32)
NX_PROP_SESSION_INTF_FLEX_RAY_SYNC_CH_B_ODD = (0x0000005E | NX_CLASS_SESSION | NX_PRPTYPE_1_DU32)
NX_PROP_SESSION_INTF_FLEX_RAY_SYNC_STATUS = (0x0000005F | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_FLEX_RAY_TERM = (0x00000057 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_FLEX_RAY_WAKEUP_CH = (0x00000055 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_FLEX_RAY_WAKEUP_PTRN = (0x00000056 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_FLEX_RAY_SLEEP = (0x0000003B | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_FLEX_RAY_CONNECTED_CHS = (0x0000003C | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_LIN_BREAK_LENGTH = (0x00000070 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_LIN_MASTER = (0x00000072 | NX_CLASS_SESSION | NX_PRPTYPE_BOOL)
NX_PROP_SESSION_INTF_LIN_SCHED_NAMES = (0x00000075 | NX_CLASS_SESSION | NX_PRPTYPE_1_DSTRING)
NX_PROP_SESSION_INTF_LIN_SLEEP = (0x00000073 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_LIN_TERM = (0x00000074 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_INTF_LIN_DIAG_P_2MIN = (0x00000077 | NX_CLASS_SESSION | NX_PRPTYPE_F64)
NX_PROP_SESSION_INTF_LIN_DIAG_S_TMIN = (0x00000076 | NX_CLASS_SESSION | NX_PRPTYPE_F64)
NX_PROP_SESSION_INTF_LIN_ALW_START_WO_BUS_PWR = (0x00000078 | NX_CLASS_SESSION | NX_PRPTYPE_BOOL)
NX_PROP_SESSION_INTF_LINO_STR_SLV_RSP_LST_BY_NAD = (0x00000079 | NX_CLASS_SESSION | NX_PRPTYPE_1_DU32)
NX_PROP_SESSION_INTF_LIN_NO_RESPONSE_TO_IN_STRM = (0x00000080 | NX_CLASS_SESSION | NX_PRPTYPE_BOOL)
NX_PROP_SESSION_INTF_SRC_TERM_START_TRIGGER = (0x00000090 | NX_CLASS_SESSION | NX_PRPTYPE_STRING)
NX_PROP_SESSION_J1939_ADDRESS = (0x00000092 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_J1939_NAME = (0x00000094 | NX_CLASS_SESSION | NX_PRPTYPE_U64)
NX_PROP_SESSION_J1939ECU = (0x00000093 | NX_CLASS_SESSION | NX_PRPTYPE_REF)
NX_PROP_SESSION_J1939_TIMEOUT_T1 = (0x00000095 | NX_CLASS_SESSION | NX_PRPTYPE_F64)
NX_PROP_SESSION_J1939_TIMEOUT_T2 = (0x00000096 | NX_CLASS_SESSION | NX_PRPTYPE_F64)
NX_PROP_SESSION_J1939_TIMEOUT_T3 = (0x00000097 | NX_CLASS_SESSION | NX_PRPTYPE_F64)
NX_PROP_SESSION_J1939_TIMEOUT_T4 = (0x00000098 | NX_CLASS_SESSION | NX_PRPTYPE_F64)
NX_PROP_SESSION_J1939_RESPONSE_TIME_TR_SD = (0x00000099 | NX_CLASS_SESSION | NX_PRPTYPE_F64)
NX_PROP_SESSION_J1939_RESPONSE_TIME_TR_GD = (0x0000009A | NX_CLASS_SESSION | NX_PRPTYPE_F64)
NX_PROP_SESSION_J1939_HOLD_TIME_TH = (0x0000009B | NX_CLASS_SESSION | NX_PRPTYPE_F64)
NX_PROP_SESSION_J1939_NUM_PACKETS_RECV = (0x0000009C | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_J1939_NUM_PACKETS_RESP = (0x0000009D | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_J1939_MAX_REPEAT_CTS = (0x0000009E | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_J1939_FILL_BYTE = (0x0000009F | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_J1939_WRITE_QUEUE_SIZE = (0x000000A0 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_J1939ECU_BUSY = (0x000000A1 | NX_CLASS_SESSION | NX_PRPTYPE_BOOL)
NX_PROP_SESSION_INTF_CAN_EDGE_FILTER = (0x000000A2 | NX_CLASS_SESSION | NX_PRPTYPE_BOOL)
NX_PROP_SESSION_INTF_CAN_TRANSMIT_PAUSE = (0x000000A3 | NX_CLASS_SESSION | NX_PRPTYPE_BOOL)
NX_PROP_SESSION_INTF_CAN_DISABLE_PROT_EXCEPTION_HANDLING = (0x000000A4 | NX_CLASS_SESSION | NX_PRPTYPE_BOOL)
NX_PROP_SYS_DEV_REFS = (0x00000002 | NX_CLASS_SYSTEM | NX_PRPTYPE_1_DREF)
NX_PROP_SYS_INTF_REFS = (0x00000003 | NX_CLASS_SYSTEM | NX_PRPTYPE_1_DREF)
NX_PROP_SYS_INTF_REFS_CAN = (0x00000004 | NX_CLASS_SYSTEM | NX_PRPTYPE_1_DREF)
NX_PROP_SYS_INTF_REFS_FLEX_RAY = (0x00000005 | NX_CLASS_SYSTEM | NX_PRPTYPE_1_DREF)
NX_PROP_SYS_INTF_REFS_LIN = (0x00000007 | NX_CLASS_SYSTEM | NX_PRPTYPE_1_DREF)
NX_PROP_SYS_VER_BUILD = (0x00000006 | NX_CLASS_SYSTEM | NX_PRPTYPE_U32)
NX_PROP_SYS_VER_MAJOR = (0x00000008 | NX_CLASS_SYSTEM | NX_PRPTYPE_U32)
NX_PROP_SYS_VER_MINOR = (0x00000009 | NX_CLASS_SYSTEM | NX_PRPTYPE_U32)
NX_PROP_SYS_VER_PHASE = (0x0000000A | NX_CLASS_SYSTEM | NX_PRPTYPE_U32)
NX_PROP_SYS_VER_UPDATE = (0x0000000B | NX_CLASS_SYSTEM | NX_PRPTYPE_U32)
NX_PROP_SYS_INTF_REFS_ALL = (0x0000000D | NX_CLASS_SYSTEM | NX_PRPTYPE_1_DREF)
NX_PROP_DEV_FORM_FAC = (0x00000001 | NX_CLASS_DEVICE | NX_PRPTYPE_U32)
NX_PROP_DEV_INTF_REFS = (0x00000002 | NX_CLASS_DEVICE | NX_PRPTYPE_1_DREF)
NX_PROP_DEV_NAME = (0x00000003 | NX_CLASS_DEVICE | NX_PRPTYPE_STRING)
NX_PROP_DEV_NUM_PORTS = (0x00000004 | NX_CLASS_DEVICE | NX_PRPTYPE_U32)
NX_PROP_DEV_PRODUCT_NUM = (0x00000008 | NX_CLASS_DEVICE | NX_PRPTYPE_U32)
NX_PROP_DEV_SER_NUM = (0x00000005 | NX_CLASS_DEVICE | NX_PRPTYPE_U32)
NX_PROP_DEV_SLOT_NUM = (0x00000006 | NX_CLASS_DEVICE | NX_PRPTYPE_U32)
NX_PROP_DEV_NUM_PORTS_ALL = (0x00000007 | NX_CLASS_DEVICE | NX_PRPTYPE_U32)
NX_PROP_DEV_INTF_REFS_ALL = (0x00000008 | NX_CLASS_DEVICE | NX_PRPTYPE_1_DREF)
NX_PROP_INTF_DEV_REF = (0x00000001 | NX_CLASS_INTERFACE | NX_PRPTYPE_REF)
NX_PROP_INTF_NAME = (0x00000002 | NX_CLASS_INTERFACE | NX_PRPTYPE_STRING)
NX_PROP_INTF_NUM = (0x00000003 | NX_CLASS_INTERFACE | NX_PRPTYPE_U32)
NX_PROP_INTF_PORT_NUM = (0x00000004 | NX_CLASS_INTERFACE | NX_PRPTYPE_U32)
NX_PROP_INTF_PROTOCOL = (0x00000005 | NX_CLASS_INTERFACE | NX_PRPTYPE_U32)
NX_PROP_INTF_CAN_TERM_CAP = (0x00000008 | NX_CLASS_INTERFACE | NX_PRPTYPE_U32)
NX_PROP_INTF_CAN_TCVR_CAP = (0x00000007 | NX_CLASS_INTERFACE | NX_PRPTYPE_U32)
NX_PROP_INTF_DONGLE_STATE = (0x00000009 | NX_CLASS_INTERFACE | NX_PRPTYPE_U32)
NX_PROP_INTF_DONGLE_ID = (0x0000000A | NX_CLASS_INTERFACE | NX_PRPTYPE_U32)
NX_PROP_INTF_DONGLE_REVISION = (0x0000000C | NX_CLASS_INTERFACE | NX_PRPTYPE_U32)
NX_PROP_INTF_DONGLE_FIRMWARE_VERSION = (0x0000000D | NX_CLASS_INTERFACE | NX_PRPTYPE_U32)
NX_PROP_INTF_DONGLE_COMPATIBLE_REVISION = (0x0000000E | NX_CLASS_INTERFACE | NX_PRPTYPE_U32)
NX_PROP_INTF_DONGLE_COMPATIBLE_FIRMWARE_VERSION = (0x0000000F | NX_CLASS_INTERFACE | NX_PRPTYPE_U32)
NX_PROP_SESSION_SUB_CAN_START_TIME_OFF = (0x00000081 | NX_CLASS_SESSION | NX_PRPTYPE_F64)
NX_PROP_SESSION_SUB_CAN_TX_TIME = (0x00000082 | NX_CLASS_SESSION | NX_PRPTYPE_F64)
NX_PROP_SESSION_SUB_SKIP_N_CYCLIC_FRAMES = (0x00000083 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_SUB_OUTPUT_QUEUE_UPDATE_FREQ = (0x00000084 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_SUB_LIN_TX_N_CORRUPTED_CHKSUMS = (0x00000085 | NX_CLASS_SESSION | NX_PRPTYPE_U32)
NX_PROP_SESSION_SUB_J1939_ADDR_FILTER = (0x00000086 | NX_CLASS_SESSION | NX_PRPTYPE_STRING)
NX_PROP_DATABASE_NAME = (0x00000001 | NX_CLASS_DATABASE | NX_PRPTYPE_STRING)
NX_PROP_DATABASE_CLST_REFS = (0x00000002 | NX_CLASS_DATABASE | NX_PRPTYPE_1_DREF)
NX_PROP_DATABASE_SHOW_INVALID_FROM_OPEN = (0x00000003 | NX_CLASS_DATABASE | NX_PRPTYPE_BOOL)
NX_PROP_CLST_BAUD_RATE = (0x00000001 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_BAUD_RATE64 = (0x00000001 | NX_CLASS_CLUSTER | NX_PRPTYPE_U64)
NX_PROP_CLST_COMMENT = (0x00000008 | NX_CLASS_CLUSTER | NX_PRPTYPE_STRING)
NX_PROP_CLST_CONFIG_STATUS = (0x00000009 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_DATABASE_REF = (0x00000002 | NX_CLASS_CLUSTER | NX_PRPTYPE_REF)
NX_PROP_CLST_ECU_REFS = (0x00000003 | NX_CLASS_CLUSTER | NX_PRPTYPE_1_DREF)
NX_PROP_CLST_FRM_REFS = (0x00000004 | NX_CLASS_CLUSTER | NX_PRPTYPE_1_DREF)
NX_PROP_CLST_NAME = (0x00000005 | NX_CLASS_CLUSTER | NX_PRPTYPE_STRING)
NX_PROP_CLST_PDU_REFS = (0x00000008 | NX_CLASS_CLUSTER | NX_PRPTYPE_1_DREF)
NX_PROP_CLST_PD_US_REQD = (0x0000000A | NX_CLASS_CLUSTER | NX_PRPTYPE_BOOL)
NX_PROP_CLST_PROTOCOL = (0x00000006 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_SIG_REFS = (0x00000007 | NX_CLASS_CLUSTER | NX_PRPTYPE_1_DREF)
NX_PROP_CLST_CAN_IO_MODE = (0x00000010 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_CAN_FD_BAUD_RATE = (0x00000011 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_CAN_FD_BAUD_RATE64 = (0x00000011 | NX_CLASS_CLUSTER | NX_PRPTYPE_U64)
NX_PROP_CLST_FLEX_RAY_ACT_PT_OFF = (0x00000020 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_CAS_RX_L_MAX = (0x00000021 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_CHANNELS = (0x00000022 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_CLST_DRIFT_DMP = (0x00000023 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_COLD_ST_ATS = (0x00000024 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_CYCLE = (0x00000025 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_DYN_SEG_START = (0x00000026 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_DYN_SLOT_IDL_PH = (0x00000027 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_LATEST_USABLE_DYN = (0x0000002A | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_LATEST_GUAR_DYN = (0x0000002B | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_LIS_NOISE = (0x00000028 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_MACRO_PER_CYCLE = (0x00000029 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_MACROTICK = (0x00000030 | NX_CLASS_CLUSTER | NX_PRPTYPE_F64)
NX_PROP_CLST_FLEX_RAY_MAX_WO_CLK_COR_FAT = (0x00000031 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_MAX_WO_CLK_COR_PAS = (0x00000032 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_MINISLOT_ACT_PT = (0x00000033 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_MINISLOT = (0x00000034 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_NM_VEC_LEN = (0x00000035 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_NIT = (0x00000036 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_NIT_START = (0x00000037 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_NUM_MINISLT = (0x00000038 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_NUM_STAT_SLT = (0x00000039 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_OFF_COR_ST = (0x00000040 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_PAYLD_LEN_DYN_MAX = (0x00000041 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_PAYLD_LEN_MAX = (0x00000042 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_PAYLD_LEN_ST = (0x00000043 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_STAT_SLOT = (0x00000045 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_SYM_WIN = (0x00000046 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_SYM_WIN_START = (0x00000047 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_SYNC_NODE_MAX = (0x00000048 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_TSS_TX = (0x00000049 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_WAKE_SYM_RX_IDL = (0x00000050 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_WAKE_SYM_RX_LOW = (0x00000051 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_WAKE_SYM_RX_WIN = (0x00000052 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_WAKE_SYM_TX_IDL = (0x00000053 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_WAKE_SYM_TX_LOW = (0x00000054 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_FLEX_RAY_USE_WAKEUP = (0x00000055 | NX_CLASS_CLUSTER | NX_PRPTYPE_BOOL)
NX_PROP_CLST_LIN_SCHEDULES = (0x00000070 | NX_CLASS_CLUSTER | NX_PRPTYPE_1_DREF)
NX_PROP_CLST_LIN_TICK = (0x00000071 | NX_CLASS_CLUSTER | NX_PRPTYPE_F64)
NX_PROP_CLST_FLEX_RAY_ALW_PASS_ACT = (0x00000072 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_APPLICATION_PROTOCOL = (0x00000073 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_CLST_CAN_FD_ISO_MODE = (0x00000074 | NX_CLASS_CLUSTER | NX_PRPTYPE_U32)
NX_PROP_FRM_APPLICATION_PROTOCOL = (0x00000064 | NX_CLASS_FRAME | NX_PRPTYPE_U32)
NX_PROP_FRM_CLUSTER_REF = (0x00000001 | NX_CLASS_FRAME | NX_PRPTYPE_REF)
NX_PROP_FRM_COMMENT = (0x00000002 | NX_CLASS_FRAME | NX_PRPTYPE_STRING)
NX_PROP_FRM_CONFIG_STATUS = (0x00000009 | NX_CLASS_FRAME | NX_PRPTYPE_U32)
NX_PROP_FRM_DEFAULT_PAYLOAD = (0x00000005 | NX_CLASS_FRAME | NX_PRPTYPE_1_DU8)
NX_PROP_FRM_ID = (0x00000003 | NX_CLASS_FRAME | NX_PRPTYPE_U32)
NX_PROP_FRM_NAME = (0x00000004 | NX_CLASS_FRAME | NX_PRPTYPE_STRING)
NX_PROP_FRM_PAYLOAD_LEN = (0x00000007 | NX_CLASS_FRAME | NX_PRPTYPE_U32)
NX_PROP_FRM_SIG_REFS = (0x00000008 | NX_CLASS_FRAME | NX_PRPTYPE_1_DREF)
NX_PROP_FRM_CAN_EXT_ID = (0x00000010 | NX_CLASS_FRAME | NX_PRPTYPE_BOOL)
NX_PROP_FRM_CAN_TIMING_TYPE = (0x00000011 | NX_CLASS_FRAME | NX_PRPTYPE_U32)
NX_PROP_FRM_CAN_TX_TIME = (0x00000012 | NX_CLASS_FRAME | NX_PRPTYPE_F64)
NX_PROP_FRM_FLEX_RAY_BASE_CYCLE = (0x00000020 | NX_CLASS_FRAME | NX_PRPTYPE_U32)
NX_PROP_FRM_FLEX_RAY_CH_ASSIGN = (0x00000021 | NX_CLASS_FRAME | NX_PRPTYPE_U32)
NX_PROP_FRM_FLEX_RAY_CYCLE_REP = (0x00000022 | NX_CLASS_FRAME | NX_PRPTYPE_U32)
NX_PROP_FRM_FLEX_RAY_PREAMBLE = (0x00000023 | NX_CLASS_FRAME | NX_PRPTYPE_BOOL)
NX_PROP_FRM_FLEX_RAY_STARTUP = (0x00000024 | NX_CLASS_FRAME | NX_PRPTYPE_BOOL)
NX_PROP_FRM_FLEX_RAY_SYNC = (0x00000025 | NX_CLASS_FRAME | NX_PRPTYPE_BOOL)
NX_PROP_FRM_FLEX_RAY_TIMING_TYPE = (0x00000026 | NX_CLASS_FRAME | NX_PRPTYPE_U32)
NX_PROP_FRM_FLEX_RAY_IN_CYC_REP_ENABLED = (0x00000030 | NX_CLASS_FRAME | NX_PRPTYPE_BOOL)
NX_PROP_FRM_FLEX_RAY_IN_CYC_REP_I_DS = (0x00000031 | NX_CLASS_FRAME | NX_PRPTYPE_1_DU32)
NX_PROP_FRM_FLEX_RAY_IN_CYC_REP_CH_ASSIGNS = (0x00000032 | NX_CLASS_FRAME | NX_PRPTYPE_1_DU32)
NX_PROP_FRM_LIN_CHECKSUM = (0x00000050 | NX_CLASS_FRAME | NX_PRPTYPE_U32)
NX_PROP_FRM_MUX_IS_MUXED = (0x00000040 | NX_CLASS_FRAME | NX_PRPTYPE_BOOL)
NX_PROP_FRM_MUX_DATA_MUX_SIG_REF = (0x00000041 | NX_CLASS_FRAME | NX_PRPTYPE_REF)
NX_PROP_FRM_MUX_STATIC_SIG_REFS = (0x00000042 | NX_CLASS_FRAME | NX_PRPTYPE_1_DREF)
NX_PROP_FRM_MUX_SUBFRAME_REFS = (0x00000043 | NX_CLASS_FRAME | NX_PRPTYPE_1_DREF)
NX_PROP_FRM_PDU_REFS = (0x00000060 | NX_CLASS_FRAME | NX_PRPTYPE_1_DREF)
NX_PROP_FRM_PDU_START_BITS = (0x00000061 | NX_CLASS_FRAME | NX_PRPTYPE_1_DU32)
NX_PROP_FRM_PDU_UPDATE_BITS = (0x00000063 | NX_CLASS_FRAME | NX_PRPTYPE_1_DU32)
NX_PROP_FRM_VARIABLE_PAYLOAD = (0x00000065 | NX_CLASS_FRAME | NX_PRPTYPE_BOOL)
NX_PROP_FRM_CA_NIO_MODE = (0x00000066 | NX_CLASS_FRAME | NX_PRPTYPE_U32)
NX_PROP_PDU_CLUSTER_REF = (0x00000004 | NX_CLASS_PDU | NX_PRPTYPE_REF)
NX_PROP_PDU_DEFAULT_PAYLOAD = (0x00000005 | NX_CLASS_PDU | NX_PRPTYPE_1_DU8)
NX_PROP_PDU_COMMENT = (0x00000002 | NX_CLASS_PDU | NX_PRPTYPE_STRING)
NX_PROP_PDU_CONFIG_STATUS = (0x00000007 | NX_CLASS_PDU | NX_PRPTYPE_U32)
NX_PROP_PDU_FRM_REFS = (0x00000006 | NX_CLASS_PDU | NX_PRPTYPE_1_DREF)
NX_PROP_PDU_NAME = (0x00000001 | NX_CLASS_PDU | NX_PRPTYPE_STRING)
NX_PROP_PDU_PAYLOAD_LEN = (0x00000003 | NX_CLASS_PDU | NX_PRPTYPE_U32)
NX_PROP_PDU_SIG_REFS = (0x00000005 | NX_CLASS_PDU | NX_PRPTYPE_1_DREF)
NX_PROP_PDU_MUX_IS_MUXED = (0x00000008 | NX_CLASS_PDU | NX_PRPTYPE_BOOL)
NX_PROP_PDU_MUX_DATA_MUX_SIG_REF = (0x00000009 | NX_CLASS_PDU | NX_PRPTYPE_REF)
NX_PROP_PDU_MUX_STATIC_SIG_REFS = (0x0000000A | NX_CLASS_PDU | NX_PRPTYPE_1_DREF)
NX_PROP_PDU_MUX_SUBFRAME_REFS = (0x0000000B | NX_CLASS_PDU | NX_PRPTYPE_1_DREF)
NX_PROP_SIG_BYTE_ORDR = (0x00000001 | NX_CLASS_SIGNAL | NX_PRPTYPE_U32)
NX_PROP_SIG_COMMENT = (0x00000002 | NX_CLASS_SIGNAL | NX_PRPTYPE_STRING)
NX_PROP_SIG_CONFIG_STATUS = (0x00000009 | NX_CLASS_SIGNAL | NX_PRPTYPE_U32)
NX_PROP_SIG_DATA_TYPE = (0x00000003 | NX_CLASS_SIGNAL | NX_PRPTYPE_U32)
NX_PROP_SIG_DEFAULT = (0x00000004 | NX_CLASS_SIGNAL | NX_PRPTYPE_F64)
NX_PROP_SIG_FRAME_REF = (0x00000005 | NX_CLASS_SIGNAL | NX_PRPTYPE_REF)
NX_PROP_SIG_MAX = (0x00000006 | NX_CLASS_SIGNAL | NX_PRPTYPE_F64)
NX_PROP_SIG_MIN = (0x00000007 | NX_CLASS_SIGNAL | NX_PRPTYPE_F64)
NX_PROP_SIG_NAME = (0x00000008 | NX_CLASS_SIGNAL | NX_PRPTYPE_STRING)
NX_PROP_SIG_NAME_UNIQUE_TO_CLUSTER = (0x00000010 | NX_CLASS_SIGNAL | NX_PRPTYPE_STRING)
NX_PROP_SIG_NUM_BITS = (0x00000012 | NX_CLASS_SIGNAL | NX_PRPTYPE_U32)
NX_PROP_SIG_PDU_REF = (0x00000011 | NX_CLASS_SIGNAL | NX_PRPTYPE_REF)
NX_PROP_SIG_SCALE_FAC = (0x00000013 | NX_CLASS_SIGNAL | NX_PRPTYPE_F64)
NX_PROP_SIG_SCALE_OFF = (0x00000014 | NX_CLASS_SIGNAL | NX_PRPTYPE_F64)
NX_PROP_SIG_START_BIT = (0x00000015 | NX_CLASS_SIGNAL | NX_PRPTYPE_U32)
NX_PROP_SIG_UNIT = (0x00000016 | NX_CLASS_SIGNAL | NX_PRPTYPE_STRING)
NX_PROP_SIG_MUX_IS_DATA_MUX = (0x00000030 | NX_CLASS_SIGNAL | NX_PRPTYPE_BOOL)
NX_PROP_SIG_MUX_IS_DYNAMIC = (0x00000031 | NX_CLASS_SIGNAL | NX_PRPTYPE_BOOL)
NX_PROP_SIG_MUX_VALUE = (0x00000032 | NX_CLASS_SIGNAL | NX_PRPTYPE_U32)
NX_PROP_SIG_MUX_SUBFRM_REF = (0x00000033 | NX_CLASS_SIGNAL | NX_PRPTYPE_REF)
NX_PROP_SUBFRM_CONFIG_STATUS = (0x00000009 | NX_CLASS_SUBFRAME | NX_PRPTYPE_U32)
NX_PROP_SUBFRM_DYN_SIG_REFS = (0x00000001 | NX_CLASS_SUBFRAME | NX_PRPTYPE_1_DREF)
NX_PROP_SUBFRM_FRM_REF = (0x00000002 | NX_CLASS_SUBFRAME | NX_PRPTYPE_REF)
NX_PROP_SUBFRM_MUX_VALUE = (0x00000003 | NX_CLASS_SUBFRAME | NX_PRPTYPE_U32)
NX_PROP_SUBFRM_NAME = (0x00000004 | NX_CLASS_SUBFRAME | NX_PRPTYPE_STRING)
NX_PROP_SUBFRM_PDU_REF = (0x00000005 | NX_CLASS_SUBFRAME | NX_PRPTYPE_REF)
NX_PROP_SUBFRM_NAME_UNIQUE_TO_CLUSTER = (0x00000007 | NX_CLASS_SUBFRAME | NX_PRPTYPE_STRING)
NX_PROP_ECU_CLST_REF = (0x00000001 | NX_CLASS_ECU | NX_PRPTYPE_REF)
NX_PROP_ECU_COMMENT = (0x00000005 | NX_CLASS_ECU | NX_PRPTYPE_STRING)
NX_PROP_ECU_CONFIG_STATUS = (0x00000009 | NX_CLASS_ECU | NX_PRPTYPE_U32)
NX_PROP_ECU_NAME = (0x00000002 | NX_CLASS_ECU | NX_PRPTYPE_STRING)
NX_PROP_ECU_RX_FRM_REFS = (0x00000003 | NX_CLASS_ECU | NX_PRPTYPE_1_DREF)
NX_PROP_ECU_TX_FRM_REFS = (0x00000004 | NX_CLASS_ECU | NX_PRPTYPE_1_DREF)
NX_PROP_ECU_FLEX_RAY_IS_COLDSTART = (0x00000010 | NX_CLASS_ECU | NX_PRPTYPE_BOOL)
NX_PROP_ECU_FLEX_RAY_STARTUP_FRAME_REF = (0x00000011 | NX_CLASS_ECU | NX_PRPTYPE_REF)
NX_PROP_ECU_FLEX_RAY_WAKEUP_PTRN = (0x00000012 | NX_CLASS_ECU | NX_PRPTYPE_U32)
NX_PROP_ECU_FLEX_RAY_WAKEUP_CHS = (0x00000013 | NX_CLASS_ECU | NX_PRPTYPE_U32)
NX_PROP_ECU_FLEX_RAY_CONNECTED_CHS = (0x00000014 | NX_CLASS_ECU | NX_PRPTYPE_U32)
NX_PROP_ECU_LIN_MASTER = (0x00000020 | NX_CLASS_ECU | NX_PRPTYPE_BOOL)
NX_PROP_ECU_LIN_PROTOCOL_VER = (0x00000021 | NX_CLASS_ECU | NX_PRPTYPE_U32)
NX_PROP_ECU_LIN_INITIAL_NAD = (0x00000022 | NX_CLASS_ECU | NX_PRPTYPE_U32)
NX_PROP_ECU_LIN_CONFIG_NAD = (0x00000023 | NX_CLASS_ECU | NX_PRPTYPE_U32)
NX_PROP_ECU_LIN_SUPPLIER_ID = (0x00000024 | NX_CLASS_ECU | NX_PRPTYPE_U32)
NX_PROP_ECU_LIN_FUNCTION_ID = (0x00000025 | NX_CLASS_ECU | NX_PRPTYPE_U32)
NX_PROP_ECU_LINP_2MIN = (0x00000026 | NX_CLASS_ECU | NX_PRPTYPE_F64)
NX_PROP_ECU_LINS_TMIN = (0x00000027 | NX_CLASS_ECU | NX_PRPTYPE_F64)
NX_PROP_ECU_J1939_PREFERRED_ADDRESS = (0x00000028 | NX_CLASS_ECU | NX_PRPTYPE_U32)
NX_PROP_ECU_J1939_NODE_NAME = (0x00000029 | NX_CLASS_ECU | NX_PRPTYPE_U64)
NX_PROP_LIN_SCHED_CLST_REF = (0x00000005 | NX_CLASS_LIN_SCHED | NX_PRPTYPE_REF)
NX_PROP_LIN_SCHED_COMMENT = (0x00000006 | NX_CLASS_LIN_SCHED | NX_PRPTYPE_STRING)
NX_PROP_LIN_SCHED_CONFIG_STATUS = (0x00000007 | NX_CLASS_LIN_SCHED | NX_PRPTYPE_U32)
NX_PROP_LIN_SCHED_ENTRIES = (0x00000001 | NX_CLASS_LIN_SCHED | NX_PRPTYPE_1_DREF)
NX_PROP_LIN_SCHED_NAME = (0x00000002 | NX_CLASS_LIN_SCHED | NX_PRPTYPE_STRING)
NX_PROP_LIN_SCHED_PRIORITY = (0x00000003 | NX_CLASS_LIN_SCHED | NX_PRPTYPE_U32)
NX_PROP_LIN_SCHED_RUN_MODE = (0x00000004 | NX_CLASS_LIN_SCHED | NX_PRPTYPE_U32)
NX_PROP_LIN_SCHED_ENTRY_COLLISION_RES_SCHED = (0x00000001 | NX_CLASS_LIN_SCHED_ENTRY | NX_PRPTYPE_REF)
NX_PROP_LIN_SCHED_ENTRY_DELAY = (0x00000002 | NX_CLASS_LIN_SCHED_ENTRY | NX_PRPTYPE_F64)
NX_PROP_LIN_SCHED_ENTRY_EVENT_ID = (0x00000003 | NX_CLASS_LIN_SCHED_ENTRY | NX_PRPTYPE_U32)
NX_PROP_LIN_SCHED_ENTRY_FRAMES = (0x00000004 | NX_CLASS_LIN_SCHED_ENTRY | NX_PRPTYPE_1_DREF)
NX_PROP_LIN_SCHED_ENTRY_NAME = (0x00000006 | NX_CLASS_LIN_SCHED_ENTRY | NX_PRPTYPE_STRING)
NX_PROP_LIN_SCHED_ENTRY_NAME_UNIQUE_TO_CLUSTER = (0x00000008 | NX_CLASS_LIN_SCHED_ENTRY | NX_PRPTYPE_STRING)
NX_PROP_LIN_SCHED_ENTRY_SCHED = (0x00000007 | NX_CLASS_LIN_SCHED_ENTRY | NX_PRPTYPE_REF)
NX_PROP_LIN_SCHED_ENTRY_TYPE = (0x00000005 | NX_CLASS_LIN_SCHED_ENTRY | NX_PRPTYPE_U32)
NX_PROP_LIN_SCHED_ENTRY_NC_FF_DATA_BYTES = (0x00000009 | NX_CLASS_LIN_SCHED_ENTRY | NX_PRPTYPE_1_DU8)
NX_MODE_SIGNAL_IN_SINGLE_POINT = 0
NX_MODE_SIGNAL_IN_WAVEFORM = 1
NX_MODE_SIGNAL_IN_XY = 2
NX_MODE_SIGNAL_OUT_SINGLE_POINT = 3
NX_MODE_SIGNAL_OUT_WAVEFORM = 4
NX_MODE_SIGNAL_OUT_XY = 5
NX_MODE_FRAME_IN_STREAM = 6
NX_MODE_FRAME_IN_QUEUED = 7
NX_MODE_FRAME_IN_SINGLE_POINT = 8
NX_MODE_FRAME_OUT_STREAM = 9
NX_MODE_FRAME_OUT_QUEUED = 10
NX_MODE_FRAME_OUT_SINGLE_POINT = 11
NX_MODE_SIGNAL_CONVERSION_SINGLE_POINT = 12
NX_START_STOP_NORMAL = 0
NX_START_STOP_SESSION_ONLY = 1
NX_START_STOP_INTERFACE_ONLY = 2
NX_START_STOP_SESSION_ONLY_BLOCKING = 3
NX_BLINK_DISABLE = 0
NX_BLINK_ENABLE = 1
NX_TERM_PXI_TRIG0 = "PXI_Trig0"
NX_TERM_PXI_TRIG1 = "PXI_Trig1"
NX_TERM_PXI_TRIG2 = "PXI_Trig2"
NX_TERM_PXI_TRIG3 = "PXI_Trig3"
NX_TERM_PXI_TRIG4 = "PXI_Trig4"
NX_TERM_PXI_TRIG5 = "PXI_Trig5"
NX_TERM_PXI_TRIG6 = "PXI_Trig6"
NX_TERM_PXI_TRIG7 = "PXI_Trig7"
NX_TERM_FRONT_PANEL0 = "FrontPanel0"
NX_TERM_FRONT_PANEL1 = "FrontPanel1"
NX_TERM_PXI_STAR = "PXI_Star"
NX_TERM_PXI_CLK10 = "PXI_Clk10"
NX_TERM_10_M_HZ_TIMEBASE = "10MHzTimebase"
NX_TERM_1_M_HZ_TIMEBASE = "1MHzTimebase"
NX_TERM_MASTER_TIMEBASE = "MasterTimebase"
NX_TERM_COMM_TRIGGER = "CommTrigger"
NX_TERM_START_TRIGGER = "StartTrigger"
NX_TERM_FLEX_RAY_START_CYCLE = "FlexRayStartCycle"
NX_TERM_FLEX_RAY_MACROTICK = "FlexRayMacrotick"
NX_TERM_LOG_TRIGGER = "LogTrigger"
NX_STATE_TIME_CURRENT = (0x00000001 | NX_CLASS_INTERFACE | NX_PRPTYPE_TIME)
NX_STATE_TIME_COMMUNICATING = (0x00000002 | NX_CLASS_INTERFACE | NX_PRPTYPE_TIME)
NX_STATE_TIME_START = (0x00000003 | NX_CLASS_INTERFACE | NX_PRPTYPE_TIME)
NX_STATE_SESSION_INFO = (0x00000004 | NX_CLASS_INTERFACE | NX_PRPTYPE_U32)
NX_STATE_CAN_COMM = (0x00000010 | NX_CLASS_INTERFACE | NX_PRPTYPE_U32)
NX_STATE_FLEX_RAY_COMM = (0x00000020 | NX_CLASS_INTERFACE | NX_PRPTYPE_U32)
NX_STATE_FLEX_RAY_STATS = (0x00000021 | NX_CLASS_INTERFACE | NX_PRPTYPE_1_DU32)
NX_STATE_LIN_COMM = (0x00000030 | NX_CLASS_INTERFACE | NX_PRPTYPE_U32)
NX_STATE_J1939_COMM = (0x00000040 | NX_CLASS_INTERFACE | NX_PRPTYPE_U32)
NX_STATE_LIN_SCHEDULE_CHANGE = (0x00000081 | NX_CLASS_INTERFACE | NX_PRPTYPE_U32)
NX_STATE_LIN_DIAGNOSTIC_SCHEDULE_CHANGE = (0x00000083 | NX_CLASS_INTERFACE | NX_PRPTYPE_U32)
NX_STATE_FLEX_RAY_SYMBOL = (0x00000082 | NX_CLASS_INTERFACE | NX_PRPTYPE_U32)
NX_CAN_FD_MODE_ISO = 0
NX_CAN_FD_MODE_NON_ISO = 1
NX_CAN_FD_MODE_ISO_LEGACY = 2
NX_SESSION_INFO_STATE_STOPPED = 0
NX_SESSION_INFO_STATE_STARTED = 1
NX_SESSION_INFO_STATE_MIX = 2
NX_CAN_COMM_STATE_ERROR_ACTIVE = 0
NX_CAN_COMM_STATE_ERROR_PASSIVE = 1
NX_CAN_COMM_STATE_BUS_OFF = 2
NX_CAN_COMM_STATE_INIT = 3
NX_CAN_LAST_ERR_NONE = 0
NX_CAN_LAST_ERR_STUFF = 1
NX_CAN_LAST_ERR_FORM = 2
NX_CAN_LAST_ERR_ACK = 3
NX_CAN_LAST_ERR_BIT1 = 4
NX_CAN_LAST_ERR_BIT0 = 5
NX_CAN_LAST_ERR_CRC = 6
NX_CA_NIO_MODE_CAN = 0
NX_CA_NIO_MODE_CAN_FD = 1
NX_CA_NIO_MODE_CAN_FD_BRS = 2
NX_FLEX_RAY_POC_STATE_DEFAULT_CONFIG = 0
NX_FLEX_RAY_POC_STATE_READY = 1
NX_FLEX_RAY_POC_STATE_NORMAL_ACTIVE = 2
NX_FLEX_RAY_POC_STATE_NORMAL_PASSIVE = 3
NX_FLEX_RAY_POC_STATE_HALT = 4
NX_FLEX_RAY_POC_STATE_MONITOR = 5
NX_FLEX_RAY_POC_STATE_CONFIG = 15
NX_LIN_COMM_STATE_IDLE = 0
NX_LIN_COMM_STATE_ACTIVE = 1
NX_LIN_COMM_STATE_INACTIVE = 2
NX_LIN_DIAGNOSTIC_SCHEDULE_NULL = 0x0000
NX_LIN_DIAGNOSTIC_SCHEDULE_MASTER_REQ = 0x0001
NX_LIN_DIAGNOSTIC_SCHEDULE_SLAVE_RESP = 0x0002
NX_LIN_LAST_ERR_CODE_NONE = 0
NX_LIN_LAST_ERR_CODE_UNKNOWN_ID = 1
NX_LIN_LAST_ERR_CODE_FORM = 2
NX_LIN_LAST_ERR_CODE_FRAMING = 3
NX_LIN_LAST_ERR_CODE_READBACK = 4
NX_LIN_LAST_ERR_CODE_TIMEOUT = 5
NX_LIN_LAST_ERR_CODE_CRC = 6
NX_CONDITION_TRANSMIT_COMPLETE = 0x8001
NX_CONDITION_INTF_COMMUNICATING = 0x8002
NX_CONDITION_INTF_REMOTE_WAKEUP = 0x8003
NX_TIMEOUT_NONE = (0)
NX_TIMEOUT_INFINITE = (-1)
NX_GET_DBC_MODE_ATTRIBUTE = 0
NX_GET_DBC_MODE_ENUMERATION_LIST = 1
NX_GET_DBC_MODE_ATTRIBUTE_LIST = 2
NX_GET_DBC_MODE_VALUE_TABLE_LIST = 3
NXDB_MERGE_COPY_USE_SOURCE = 0
NXDB_MERGE_COPY_USE_TARGET = 1
NXDB_MERGE_MERGE_USE_SOURCE = 2
NXDB_MERGE_MERGE_USE_TARGET = 3
NX_DONGLE_STATE_NO_DONGLE_NO_EXT_POWER = 1
NX_DONGLE_STATE_NO_DONGLE_EXT_POWER = 2
NX_DONGLE_STATE_DONGLE_NO_EXT_POWER = 3
NX_DONGLE_STATE_READY = 4
NX_DONGLE_STATE_BUSY = 5
NX_DONGLE_STATE_COMM_ERROR = 13
NX_DONGLE_STATE_OVER_CURRENT = 14
NX_DONGLE_ID_LS_CAN = 1
NX_DONGLE_ID_HS_CAN = 2
NX_DONGLE_ID_SW_CAN = 3
NX_DONGLE_ID_XS_CAN = 4
NX_DONGLE_ID_LIN = 6
NX_DONGLE_ID_DONGLE_LESS = 13
NX_DONGLE_ID_UNKNOWN = 14
NX_PHASE_DEVELOPMENT = 0
NX_PHASE_ALPHA = 1
NX_PHASE_BETA = 2
NX_PHASE_RELEASE = 3
NX_DEV_FORM_PXI = 0
NX_DEV_FORM_PCI = 1
NX_DEV_FORM_C_SERIES = 2
NX_DEV_FORM_PX_IE = 3
NX_DEV_FORM_USB = 4
NX_CAN_TERM_CAP_NO = 0
NX_CAN_TERM_CAP_YES = 1
NX_CAN_TCVR_CAP_HS = 0
NX_CAN_TCVR_CAP_LS = 1
NX_CAN_TCVR_CAP_XS = 3
NX_CAN_TCVR_CAP_XS_HS_LS = 4
NX_CAN_TCVR_CAP_UNKNOWN = 0xFFFFFFFF
NX_PROTOCOL_UNKNOWN = 0xFFFFFFFE
NX_PROTOCOL_CAN = 0
NX_PROTOCOL_FLEX_RAY = 1
NX_PROTOCOL_LIN = 2
NX_APP_PROTOCOL_NONE = 0
NX_APP_PROTOCOL_J1939 = 1
NX_CAN_TERM_OFF = 0
NX_CAN_TERM_ON = 1
NX_CAN_TCVR_STATE_NORMAL = 0
NX_CAN_TCVR_STATE_SLEEP = 1
NX_CAN_TCVR_STATE_SW_WAKEUP = 2
NX_CAN_TCVR_STATE_SW_HIGH_SPEED = 3
NX_CAN_TCVR_TYPE_HS = 0
NX_CAN_TCVR_TYPE_LS = 1
NX_CAN_TCVR_TYPE_SW = 2
NX_CAN_TCVR_TYPE_EXT = 3
NX_CAN_TCVR_TYPE_DISC = 4
NX_FLEX_RAY_SAMP_PER_MICRO_1 = 0
NX_FLEX_RAY_SAMP_PER_MICRO_2 = 1
NX_FLEX_RAY_SAMP_PER_MICRO_4 = 2
NX_FLEX_RAY_TERM_OFF = 0
NX_FLEX_RAY_TERM_ON = 1
NX_LIN_SLEEP_REMOTE_SLEEP = 0
NX_LIN_SLEEP_REMOTE_WAKE = 1
NX_LIN_SLEEP_LOCAL_SLEEP = 2
NX_LIN_SLEEP_LOCAL_WAKE = 3
NX_LIN_TERM_OFF = 0
NX_LIN_TERM_ON = 1
NX_OUT_STRM_TIMNG_IMMEDIATE = 0
NX_OUT_STRM_TIMNG_REPLAY_EXCLUSIVE = 1
NX_OUT_STRM_TIMNG_REPLAY_INCLUSIVE = 2
NX_CAN_PEND_TX_ORDER_AS_SUBMITTED = 0
NX_CAN_PEND_TX_ORDER_BY_IDENTIFIER = 1
NX_FLEX_RAY_SLEEP_LOCAL_SLEEP = 0
NX_FLEX_RAY_SLEEP_LOCAL_WAKE = 1
NX_FLEX_RAY_SLEEP_REMOTE_WAKE = 2
NX_CAN_EXT_TCVR_CONFIG_NORMAL_SUPPORTED = (1 << 2)
NX_CAN_EXT_TCVR_CONFIG_SLEEP_SUPPORTED = (1 << 5)
NX_CAN_EXT_TCVR_CONFIG_SW_WAKEUP_SUPPORTED = (1 << 8)
NX_CAN_EXT_TCVR_CONFIG_SW_HIGH_SPEED_SUPPORTED = (1 << 11)
NX_CAN_EXT_TCVR_CONFIG_POWER_ON_SUPPORTED = (1 << 14)
NX_CAN_EXT_TCVR_CONFIG_NORMAL_OUTPUT0_SET = (1 << 0)
NX_CAN_EXT_TCVR_CONFIG_SLEEP_OUTPUT0_SET = (1 << 3)
NX_CAN_EXT_TCVR_CONFIG_SW_WAKEUP_OUTPUT0_SET = (1 << 6)
NX_CAN_EXT_TCVR_CONFIG_SW_HIGH_SPEED_OUTPUT0_SET = (1 << 9)
NX_CAN_EXT_TCVR_CONFIG_POWER_ON_OUTPUT0_SET = (1 << 12)
NX_CAN_EXT_TCVR_CONFIG_NORMAL_OUTPUT1_SET = (1 << 1)
NX_CAN_EXT_TCVR_CONFIG_SLEEP_OUTPUT1_SET = (1 << 4)
NX_CAN_EXT_TCVR_CONFIG_SW_WAKEUP_OUTPUT1_SET = (1 << 7)
NX_CAN_EXT_TCVR_CONFIG_SW_HIGH_SPEED_OUTPUT1_SET = (1 << 10)
NX_CAN_EXT_TCVR_CONFIG_POWER_ON_OUTPUT1_SET = (1 << 13)
NX_CAN_EXT_TCVR_CONFIG_N_ERR_CONNECTED = (1 << 31)
NX_FRM_FLEX_RAY_CH_ASSIGN_A = 1
NX_FRM_FLEX_RAY_CH_ASSIGN_B = 2
NX_FRM_FLEX_RAY_CH_ASSIGN_AAND_B = 3
NX_FRM_FLEX_RAY_CH_ASSIGN_NONE = 4
NX_CLST_FLEX_RAY_SAMP_CLK_PER_P0125US = 0
NX_CLST_FLEX_RAY_SAMP_CLK_PER_P025US = 1
NX_CLST_FLEX_RAY_SAMP_CLK_PER_P05US = 2
NX_FRM_FLEX_RAY_TIMING_CYCLIC = 0
NX_FRM_FLEX_RAY_TIMING_EVENT = 1
NX_FRM_CAN_TIMING_CYCLIC_DATA = 0
NX_FRM_CAN_TIMING_EVENT_DATA = 1
NX_FRM_CAN_TIMING_CYCLIC_REMOTE = 2
NX_FRM_CAN_TIMING_EVENT_REMOTE = 3
NX_FRM_CAN_TIMING_CYCLIC_EVENT = 4
NX_SIG_BYTE_ORDR_LITTLE_ENDIAN = 0
NX_SIG_BYTE_ORDR_BIG_ENDIAN = 1
NX_SIG_DATA_TYPE_SIGNED = 0
NX_SIG_DATA_TYPE_UNSIGNED = 1
NX_SIG_DATA_TYPE_IEEE_FLOAT = 2
NX_LIN_PROTOCOL_VER_1_2 = 2
NX_LIN_PROTOCOL_VER_1_3 = 3
NX_LIN_PROTOCOL_VER_2_0 = 4
NX_LIN_PROTOCOL_VER_2_1 = 5
NX_LIN_PROTOCOL_VER_2_2 = 6
NX_LIN_SCHED_RUN_MODE_CONTINUOUS = 0
NX_LIN_SCHED_RUN_MODE_ONCE = 1
NX_LIN_SCHED_RUN_MODE_NULL = 2
NX_LIN_SCHED_ENTRY_TYPE_UNCONDITIONAL = 0
NX_LIN_SCHED_ENTRY_TYPE_SPORADIC = 1
NX_LIN_SCHED_ENTRY_TYPE_EVENT_TRIGGERED = 2
NX_LIN_SCHED_ENTRY_TYPE_NODE_CONFIG_SERVICE = 3
NX_FRM_LIN_CHECKSUM_CLASSIC = 0
NX_FRM_LIN_CHECKSUM_ENHANCED = 1
NX_FRAME_TYPE_CAN_DATA = 0x00
NX_FRAME_TYPE_CAN_REMOTE = 0x01
NX_FRAME_TYPE_CAN_BUS_ERROR = 0x02
NX_FRAME_TYPE_CAN20_DATA = 0x08
NX_FRAME_TYPE_CANFD_DATA = 0x10
NX_FRAME_TYPE_CANFDBRS_DATA = 0x18
NX_FRAME_TYPE_FLEX_RAY_DATA = 0x20
NX_FRAME_TYPE_FLEX_RAY_NULL = 0x21
NX_FRAME_TYPE_FLEX_RAY_SYMBOL = 0x22
NX_FRAME_TYPE_LIN_DATA = 0x40
NX_FRAME_TYPE_LIN_BUS_ERROR = 0x41
NX_FRAME_TYPE_LIN_NO_RESPONSE = 0x42
NX_FRAME_TYPE_J1939_DATA = 0xC0
NX_FRAME_TYPE_SPECIAL_DELAY = 0xE0
NX_FRAME_TYPE_SPECIAL_LOG_TRIGGER = 0xE1
NX_FRAME_TYPE_SPECIAL_START_TRIGGER = 0xE2
NX_FRAME_ID_CAN_IS_EXTENDED = 0x20000000
NX_FRAME_FLAGS_FLEX_RAY_STARTUP = 0x01
NX_FRAME_FLAGS_FLEX_RAY_SYNC = 0x02
NX_FRAME_FLAGS_FLEX_RAY_PREAMBLE = 0x04
NX_FRAME_FLAGS_FLEX_RAY_CH_A = 0x10
NX_FRAME_FLAGS_FLEX_RAY_CH_B = 0x20
NX_FRAME_FLAGS_LIN_EVENT_SLOT = 0x01
NX_FRAME_FLAGS_TRANSMIT_ECHO = 0x80
NX_FLEX_RAY_SYMBOL_MTS = 0x00
NX_FLEX_RAY_SYMBOL_WAKEUP = 0x01
NX_FRAME_PAYLD_LEN_HIGH_MASK_J1939 = (7)
NX_SIZEOF_FRAME_HEADER = (16)
|
|
import unicodedata
from collections import OrderedDict
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import update_session_auth_hash
from django.core.exceptions import FieldDoesNotExist, ValidationError
from django.db import models
from django.db.models import Q
from django.utils.encoding import force_str
from django.utils.http import base36_to_int, int_to_base36, urlencode
from allauth.account import app_settings, signals
from allauth.account.adapter import get_adapter
from allauth.exceptions import ImmediateHttpResponse
from allauth.utils import (
get_request_param,
get_user_model,
import_callable,
valid_email_or_none,
)
def _unicode_ci_compare(s1, s2):
"""
Perform case-insensitive comparison of two identifiers, using the
recommended algorithm from Unicode Technical Report 36, section
2.11.2(B)(2).
"""
norm_s1 = unicodedata.normalize("NFKC", s1).casefold()
norm_s2 = unicodedata.normalize("NFKC", s2).casefold()
return norm_s1 == norm_s2
def get_next_redirect_url(request, redirect_field_name="next"):
"""
Returns the next URL to redirect to, if it was explicitly passed
via the request.
"""
redirect_to = get_request_param(request, redirect_field_name)
if not get_adapter(request).is_safe_url(redirect_to):
redirect_to = None
return redirect_to
def get_login_redirect_url(request, url=None, redirect_field_name="next", signup=False):
ret = url
if url and callable(url):
# In order to be able to pass url getters around that depend
# on e.g. the authenticated state.
ret = url()
if not ret:
ret = get_next_redirect_url(request, redirect_field_name=redirect_field_name)
if not ret:
if signup:
ret = get_adapter(request).get_signup_redirect_url(request)
else:
ret = get_adapter(request).get_login_redirect_url(request)
return ret
_user_display_callable = None
def logout_on_password_change(request, user):
# Since it is the default behavior of Django to invalidate all sessions on
# password change, this function actually has to preserve the session when
# logout isn't desired.
if not app_settings.LOGOUT_ON_PASSWORD_CHANGE:
update_session_auth_hash(request, user)
def default_user_display(user):
if app_settings.USER_MODEL_USERNAME_FIELD:
return getattr(user, app_settings.USER_MODEL_USERNAME_FIELD)
else:
return force_str(user)
def user_display(user):
global _user_display_callable
if not _user_display_callable:
f = getattr(settings, "ACCOUNT_USER_DISPLAY", default_user_display)
_user_display_callable = import_callable(f)
return _user_display_callable(user)
def user_field(user, field, *args):
"""
Gets or sets (optional) user model fields. No-op if fields do not exist.
"""
if not field:
return
User = get_user_model()
try:
field_meta = User._meta.get_field(field)
max_length = field_meta.max_length
except FieldDoesNotExist:
if not hasattr(user, field):
return
max_length = None
if args:
# Setter
v = args[0]
if v:
v = v[0:max_length]
setattr(user, field, v)
else:
# Getter
return getattr(user, field)
def user_username(user, *args):
if args and not app_settings.PRESERVE_USERNAME_CASING and args[0]:
args = [args[0].lower()]
return user_field(user, app_settings.USER_MODEL_USERNAME_FIELD, *args)
def user_email(user, *args):
return user_field(user, app_settings.USER_MODEL_EMAIL_FIELD, *args)
def has_verified_email(user, email=None):
from .models import EmailAddress
emailaddress = None
if email:
ret = False
try:
emailaddress = EmailAddress.objects.get_for_user(user, email)
ret = emailaddress.verified
except EmailAddress.DoesNotExist:
pass
else:
ret = EmailAddress.objects.filter(user=user, verified=True).exists()
return ret
def perform_login(
request,
user,
email_verification,
redirect_url=None,
signal_kwargs=None,
signup=False,
email=None,
):
"""
Keyword arguments:
signup -- Indicates whether or not sending the
email is essential (during signup), or if it can be skipped (e.g. in
case email verification is optional and we are only logging in).
"""
# Local users are stopped due to form validation checking
# is_active, yet, adapter methods could toy with is_active in a
# `user_signed_up` signal. Furthermore, social users should be
# stopped anyway.
adapter = get_adapter(request)
try:
hook_kwargs = dict(
email_verification=email_verification,
redirect_url=redirect_url,
signal_kwargs=signal_kwargs,
signup=signup,
email=email,
)
response = adapter.pre_login(request, user, **hook_kwargs)
if response:
return response
adapter.login(request, user)
response = adapter.post_login(request, user, **hook_kwargs)
if response:
return response
except ImmediateHttpResponse as e:
response = e.response
return response
def complete_signup(request, user, email_verification, success_url, signal_kwargs=None):
if signal_kwargs is None:
signal_kwargs = {}
signals.user_signed_up.send(
sender=user.__class__, request=request, user=user, **signal_kwargs
)
return perform_login(
request,
user,
email_verification=email_verification,
signup=True,
redirect_url=success_url,
signal_kwargs=signal_kwargs,
)
def cleanup_email_addresses(request, addresses):
"""
Takes a list of EmailAddress instances and cleans it up, making
sure only valid ones remain, without multiple primaries etc.
Order is important: e.g. if multiple primary e-mail addresses
exist, the first one encountered will be kept as primary.
"""
from .models import EmailAddress
adapter = get_adapter(request)
# Let's group by `email`
e2a = OrderedDict() # maps email to EmailAddress
primary_addresses = []
verified_addresses = []
primary_verified_addresses = []
for address in addresses:
# Pick up only valid ones...
email = valid_email_or_none(address.email)
if not email:
continue
# ... and non-conflicting ones...
if (
app_settings.UNIQUE_EMAIL
and EmailAddress.objects.filter(email__iexact=email).exists()
):
continue
a = e2a.get(email.lower())
if a:
a.primary = a.primary or address.primary
a.verified = a.verified or address.verified
else:
a = address
a.verified = a.verified or adapter.is_email_verified(request, a.email)
e2a[email.lower()] = a
if a.primary:
primary_addresses.append(a)
if a.verified:
primary_verified_addresses.append(a)
if a.verified:
verified_addresses.append(a)
# Now that we got things sorted out, let's assign a primary
if primary_verified_addresses:
primary_address = primary_verified_addresses[0]
elif verified_addresses:
# Pick any verified as primary
primary_address = verified_addresses[0]
elif primary_addresses:
# Okay, let's pick primary then, even if unverified
primary_address = primary_addresses[0]
elif e2a:
# Pick the first
primary_address = e2a.keys()[0]
else:
# Empty
primary_address = None
# There can only be one primary
for a in e2a.values():
a.primary = primary_address.email.lower() == a.email.lower()
return list(e2a.values()), primary_address
def setup_user_email(request, user, addresses):
"""
Creates proper EmailAddress for the user that was just signed
up. Only sets up, doesn't do any other handling such as sending
out email confirmation mails etc.
"""
from .models import EmailAddress
assert not EmailAddress.objects.filter(user=user).exists()
priority_addresses = []
# Is there a stashed e-mail?
adapter = get_adapter(request)
stashed_email = adapter.unstash_verified_email(request)
if stashed_email:
priority_addresses.append(
EmailAddress(user=user, email=stashed_email, primary=True, verified=True)
)
email = user_email(user)
if email:
priority_addresses.append(
EmailAddress(user=user, email=email, primary=True, verified=False)
)
addresses, primary = cleanup_email_addresses(
request, priority_addresses + addresses
)
for a in addresses:
a.user = user
a.save()
EmailAddress.objects.fill_cache_for_user(user, addresses)
if primary and email and email.lower() != primary.email.lower():
user_email(user, primary.email)
user.save()
return primary
def send_email_confirmation(request, user, signup=False, email=None):
"""
E-mail verification mails are sent:
a) Explicitly: when a user signs up
b) Implicitly: when a user attempts to log in using an unverified
e-mail while EMAIL_VERIFICATION is mandatory.
Especially in case of b), we want to limit the number of mails
sent (consider a user retrying a few times), which is why there is
a cooldown period before sending a new mail. This cooldown period
can be configured in ACCOUNT_EMAIL_CONFIRMATION_COOLDOWN setting.
"""
from .models import EmailAddress
adapter = get_adapter(request)
if not email:
email = user_email(user)
if email:
try:
email_address = EmailAddress.objects.get_for_user(user, email)
if not email_address.verified:
send_email = adapter.should_send_confirmation_mail(
request, email_address
)
if send_email:
email_address.send_confirmation(request, signup=signup)
else:
send_email = False
except EmailAddress.DoesNotExist:
send_email = True
email_address = EmailAddress.objects.add_email(
request, user, email, signup=signup, confirm=True
)
assert email_address
# At this point, if we were supposed to send an email we have sent it.
if send_email:
adapter.add_message(
request,
messages.INFO,
"account/messages/email_confirmation_sent.txt",
{"email": email},
)
if signup:
adapter.stash_user(request, user_pk_to_url_str(user))
def sync_user_email_addresses(user):
"""
Keep user.email in sync with user.emailaddress_set.
Under some circumstances the user.email may not have ended up as
an EmailAddress record, e.g. in the case of manually created admin
users.
"""
from .models import EmailAddress
email = user_email(user)
if (
email
and not EmailAddress.objects.filter(user=user, email__iexact=email).exists()
):
if (
app_settings.UNIQUE_EMAIL
and EmailAddress.objects.filter(email__iexact=email).exists()
):
# Bail out
return
# get_or_create() to gracefully handle races
EmailAddress.objects.get_or_create(
user=user, email=email, defaults={"primary": False, "verified": False}
)
def filter_users_by_username(*username):
if app_settings.PRESERVE_USERNAME_CASING:
qlist = [
Q(**{app_settings.USER_MODEL_USERNAME_FIELD + "__iexact": u})
for u in username
]
q = qlist[0]
for q2 in qlist[1:]:
q = q | q2
ret = get_user_model()._default_manager.filter(q)
else:
ret = get_user_model()._default_manager.filter(
**{
app_settings.USER_MODEL_USERNAME_FIELD
+ "__in": [u.lower() for u in username]
}
)
return ret
def filter_users_by_email(email, is_active=None):
"""Return list of users by email address
Typically one, at most just a few in length. First we look through
EmailAddress table, than customisable User model table. Add results
together avoiding SQL joins and deduplicate.
"""
from .models import EmailAddress
User = get_user_model()
mails = EmailAddress.objects.filter(email__iexact=email)
if is_active is not None:
mails = mails.filter(user__is_active=is_active)
users = []
for e in mails.prefetch_related("user"):
if _unicode_ci_compare(e.email, email):
users.append(e.user)
if app_settings.USER_MODEL_EMAIL_FIELD:
q_dict = {app_settings.USER_MODEL_EMAIL_FIELD + "__iexact": email}
user_qs = User.objects.filter(**q_dict)
if is_active is not None:
user_qs = user_qs.filter(is_active=is_active)
for user in user_qs.iterator():
user_email = getattr(user, app_settings.USER_MODEL_EMAIL_FIELD)
if _unicode_ci_compare(user_email, email):
users.append(user)
return list(set(users))
def passthrough_next_redirect_url(request, url, redirect_field_name):
assert url.find("?") < 0 # TODO: Handle this case properly
next_url = get_next_redirect_url(request, redirect_field_name)
if next_url:
url = url + "?" + urlencode({redirect_field_name: next_url})
return url
def user_pk_to_url_str(user):
"""
This should return a string.
"""
User = get_user_model()
if issubclass(type(User._meta.pk), models.UUIDField):
if isinstance(user.pk, str):
return user.pk
return user.pk.hex
ret = user.pk
if isinstance(ret, int):
ret = int_to_base36(user.pk)
return str(ret)
def url_str_to_user_pk(s):
User = get_user_model()
# TODO: Ugh, isn't there a cleaner way to determine whether or not
# the PK is a str-like field?
remote_field = getattr(User._meta.pk, "remote_field", None)
if remote_field and getattr(remote_field, "to", None):
pk_field = User._meta.pk.remote_field.to._meta.pk
else:
pk_field = User._meta.pk
if issubclass(type(pk_field), models.UUIDField):
return pk_field.to_python(s)
try:
pk_field.to_python("a")
pk = s
except ValidationError:
pk = base36_to_int(s)
return pk
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-system - based on the path /system/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Global configuration data for the system
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__hostname",
"__domain_name",
"__login_banner",
"__motd_banner",
)
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__hostname = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.)*([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.?)|\\.",
"length": ["1..253"],
},
),
is_leaf=True,
yang_name="hostname",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="inet:domain-name",
is_config=True,
)
self.__domain_name = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.)*([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.?)|\\.",
"length": ["1..253"],
},
),
is_leaf=True,
yang_name="domain-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="inet:domain-name",
is_config=True,
)
self.__login_banner = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="login-banner",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="string",
is_config=True,
)
self.__motd_banner = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="motd-banner",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="string",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return ["system", "config"]
def _get_hostname(self):
"""
Getter method for hostname, mapped from YANG variable /system/config/hostname (inet:domain-name)
YANG Description: The hostname of the device -- should be a single domain
label, without the domain.
"""
return self.__hostname
def _set_hostname(self, v, load=False):
"""
Setter method for hostname, mapped from YANG variable /system/config/hostname (inet:domain-name)
If this variable is read-only (config: false) in the
source YANG file, then _set_hostname is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_hostname() directly.
YANG Description: The hostname of the device -- should be a single domain
label, without the domain.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.)*([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.?)|\\.",
"length": ["1..253"],
},
),
is_leaf=True,
yang_name="hostname",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="inet:domain-name",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """hostname must be of a type compatible with inet:domain-name""",
"defined-type": "inet:domain-name",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.)*([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.?)|\\.', 'length': ['1..253']}), is_leaf=True, yang_name="hostname", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='inet:domain-name', is_config=True)""",
}
)
self.__hostname = t
if hasattr(self, "_set"):
self._set()
def _unset_hostname(self):
self.__hostname = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.)*([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.?)|\\.",
"length": ["1..253"],
},
),
is_leaf=True,
yang_name="hostname",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="inet:domain-name",
is_config=True,
)
def _get_domain_name(self):
"""
Getter method for domain_name, mapped from YANG variable /system/config/domain_name (inet:domain-name)
YANG Description: Specifies the domain name used to form fully qualified name
for unqualified hostnames.
"""
return self.__domain_name
def _set_domain_name(self, v, load=False):
"""
Setter method for domain_name, mapped from YANG variable /system/config/domain_name (inet:domain-name)
If this variable is read-only (config: false) in the
source YANG file, then _set_domain_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_domain_name() directly.
YANG Description: Specifies the domain name used to form fully qualified name
for unqualified hostnames.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.)*([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.?)|\\.",
"length": ["1..253"],
},
),
is_leaf=True,
yang_name="domain-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="inet:domain-name",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """domain_name must be of a type compatible with inet:domain-name""",
"defined-type": "inet:domain-name",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.)*([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.?)|\\.', 'length': ['1..253']}), is_leaf=True, yang_name="domain-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='inet:domain-name', is_config=True)""",
}
)
self.__domain_name = t
if hasattr(self, "_set"):
self._set()
def _unset_domain_name(self):
self.__domain_name = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.)*([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.?)|\\.",
"length": ["1..253"],
},
),
is_leaf=True,
yang_name="domain-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="inet:domain-name",
is_config=True,
)
def _get_login_banner(self):
"""
Getter method for login_banner, mapped from YANG variable /system/config/login_banner (string)
YANG Description: The console login message displayed before the login prompt,
i.e., before a user logs into the system.
"""
return self.__login_banner
def _set_login_banner(self, v, load=False):
"""
Setter method for login_banner, mapped from YANG variable /system/config/login_banner (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_login_banner is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_login_banner() directly.
YANG Description: The console login message displayed before the login prompt,
i.e., before a user logs into the system.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="login-banner",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="string",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """login_banner must be of a type compatible with string""",
"defined-type": "string",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="login-banner", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='string', is_config=True)""",
}
)
self.__login_banner = t
if hasattr(self, "_set"):
self._set()
def _unset_login_banner(self):
self.__login_banner = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="login-banner",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="string",
is_config=True,
)
def _get_motd_banner(self):
"""
Getter method for motd_banner, mapped from YANG variable /system/config/motd_banner (string)
YANG Description: The console message displayed after a user logs into the
system. They system may append additional standard
information such as the current system date and time, uptime,
last login timestamp, etc.
"""
return self.__motd_banner
def _set_motd_banner(self, v, load=False):
"""
Setter method for motd_banner, mapped from YANG variable /system/config/motd_banner (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_motd_banner is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_motd_banner() directly.
YANG Description: The console message displayed after a user logs into the
system. They system may append additional standard
information such as the current system date and time, uptime,
last login timestamp, etc.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="motd-banner",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="string",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """motd_banner must be of a type compatible with string""",
"defined-type": "string",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="motd-banner", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='string', is_config=True)""",
}
)
self.__motd_banner = t
if hasattr(self, "_set"):
self._set()
def _unset_motd_banner(self):
self.__motd_banner = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="motd-banner",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/system",
defining_module="openconfig-system",
yang_type="string",
is_config=True,
)
hostname = __builtin__.property(_get_hostname, _set_hostname)
domain_name = __builtin__.property(_get_domain_name, _set_domain_name)
login_banner = __builtin__.property(_get_login_banner, _set_login_banner)
motd_banner = __builtin__.property(_get_motd_banner, _set_motd_banner)
_pyangbind_elements = OrderedDict(
[
("hostname", hostname),
("domain_name", domain_name),
("login_banner", login_banner),
("motd_banner", motd_banner),
]
)
|
|
__author__ = 'rencui'
from afinn import Afinn
import numpy
import json
from textstat.textstat import textstat
from nltk.stem.porter import *
from tokenizer import simpleTokenize
import logging
from scipy.sparse import csr_matrix
import matplotlib.pyplot as plt
import matplotlib as mat
from sklearn.ensemble import ExtraTreesClassifier
stemmer = PorterStemmer()
logging.basicConfig()
dayMapper = {'Mon': 1, 'Tue': 2, 'Wed': 3, 'Thu': 4, 'Fri': 5, 'Sat': 6, 'Sun': 7}
def hourMapper(hour):
input = int(hour)
if 0 <= input < 6:
output = 0
elif 6 <= input < 12:
output = 1
elif 12 <= input < 18:
output = 2
else:
output = 3
return output
def mapMention(inputFile):
mentionFile = open(inputFile, 'r')
outputMapper = {}
for line in mentionFile:
mention = json.loads(line.strip())
if mention['verified'] == 'true':
verify = 1
else:
verify = 0
outputMapper[mention['screen_name']] = (verify, mention['followers_count'])
mentionFile.close()
return outputMapper
def stemContent(input):
words = simpleTokenize(input)
out = ''
for word in words:
temp = stemmer.stem(word)
out += temp + ' '
return out.strip()
def POSRatio(inputList):
out = []
temp = []
for item in inputList:
temp.append(float(item))
if sum(temp) == 0:
out = [0.0, 0.0, 0.0]
else:
for item in temp:
out.append(item / sum(temp))
return out
mentionMapper = mapMention('adData/analysis/ranked/mention.json')
featureList = ['Length', 'URL', 'Hashtag', 'Username', 'Sentiment', 'Readability',
'ParseDepth', 'HeadCount', 'POS_N', 'POS_V', 'POS_A', '!', '?',
'Verified', 'FollowerCount']
# happy_log_probs, sad_log_probs = utilities.readSentimentList('twitter_sentiment_list.csv')
afinn = Afinn()
posFile = open('adData/analysis/groups/totalGroup/group0.pos', 'r')
negFile = open('adData/analysis/groups/totalGroup/group0.neg', 'r')
posParseLengthFile = open('adData/analysis/groups/totalGroup/parserLength0.pos', 'r')
negParseLengthFile = open('adData/analysis/groups/totalGroup/parserLength0.neg', 'r')
posHeadCountFile = open('adData/analysis/groups/totalGroup/parserHeadCount0.pos', 'r')
negHeadCountFile = open('adData/analysis/groups/totalGroup/parserHeadCount0.neg', 'r')
posPOSCountFile = open('adData/analysis/groups/totalGroup/parserPOSCount0.pos', 'r')
negPOSCountFile = open('adData/analysis/groups/totalGroup/parserPOSCount0.neg', 'r')
ids = []
contents = []
scores = []
days = []
time = []
labels = []
parseLength = []
headCount = []
usernames = []
semanticFeatures = []
classes = []
POScounts = []
print 'loading...'
for line in posFile:
seg = line.strip().split(' :: ')
text = seg[3]
username = seg[7].split(';')
time.append(hourMapper(seg[2]))
day = seg[1]
score = float(seg[0])
ids.append(seg[5])
usernames.append(username)
days.append(dayMapper[day])
contents.append(text)
scores.append(score)
labels.append(1)
for line in negFile:
seg = line.strip().split(' :: ')
text = seg[3]
username = seg[7].split(';')
time.append(hourMapper(seg[2]))
day = seg[1]
score = float(seg[0])
ids.append(seg[5])
usernames.append(username)
days.append(dayMapper[day])
contents.append(text)
scores.append(score)
labels.append(0)
for line in posParseLengthFile:
parseLength.append(int(line.strip(' :: ')[0]))
for line in negParseLengthFile:
parseLength.append(int(line.strip(' :: ')[0]))
for line in posHeadCountFile:
headCount.append(int(line.strip(' :: ')[0]))
for line in negHeadCountFile:
headCount.append(int(line.strip(' :: ')[0]))
for line in posPOSCountFile:
POScounts.append(POSRatio(line.strip().split(' :: ')[0].split(' ')))
for line in negPOSCountFile:
POScounts.append(POSRatio(line.strip().split(' :: ')[0].split(' ')))
posHeadCountFile.close()
negHeadCountFile.close()
posParseLengthFile.close()
negParseLengthFile.close()
posPOSCountFile.close()
negPOSCountFile.close()
posFile.close()
negFile.close()
print len(contents)
for index, content in enumerate(contents):
temp = []
words = simpleTokenize(content)
twLen = float(len(words))
sentiScore = afinn.score(stemContent(content))
# posProb, negProb = utilities.classifySentiment(words, happy_log_probs, sad_log_probs)
readScore = textstat.coleman_liau_index(content)
temp.append(twLen)
# temp.append(content.count('URRL'))
if content.count('URRL') > 0:
temp.append(1)
else:
temp.append(0)
# temp.append(content.count('HHTTG'))
if content.count('HHTTG') > 0:
temp.append(1)
else:
temp.append(0)
# temp.append(content.count('USSERNM'))
if content.count('USSERNM') > 0:
temp.append(1)
else:
temp.append(0)
temp.append(sentiScore / twLen)
temp.append(readScore)
temp.append(parseLength[index] / twLen)
temp.append(headCount[index] / twLen)
# temp.append(days[index])
# temp.append(time[index])
temp += POScounts[index]
# temp.append(content.count('!'))
if content.count('!') > 0:
temp.append(1)
else:
temp.append(0)
# temp.append(content.count('?'))
if content.count('?') > 0:
temp.append(1)
else:
temp.append(0)
mentionFlag = 0
mentionFollowers = 0
userCount = 0.0
for user in usernames[index]:
if user in mentionMapper:
userCount += 1
if mentionMapper[user][0] == 1:
mentionFlag = 1
mentionFollowers += mentionMapper[user][1]
temp.append(mentionFlag)
if userCount == 0:
temp.append(0.0)
else:
temp.append(mentionFollowers / userCount)
semanticFeatures.append(numpy.array(temp))
classes.append(labels[index])
features = csr_matrix(numpy.array(semanticFeatures))
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=50,
random_state=0)
forest.fit(features, classes)
importances = forest.feature_importances_
std = numpy.std([tree.feature_importances_ for tree in forest.estimators_], axis=0)
indices = numpy.argsort(importances)[::-1]
labelList = []
for index in indices:
labelList.append(featureList[index])
# Print the feature ranking
print("Feature ranking:")
for f in range(features.shape[1]):
print(labelList[f] + '\t' + str(importances[indices[f]]))
# Plot the feature importances of the forest
font = {'family': 'normal', 'size': 12}
mat.rc('font', **font)
plt.figure()
plt.title("Feature Importances")
plt.bar(range(features.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(features.shape[1]), labelList)
plt.xlim([-1, features.shape[1]])
plt.show()
print 'Feature List: 1. Length; 2.URL; 3.Hashtag; 4.Username; 5.Sentiment; 6.Readability; 7.ParseDepth; 8.HeadCount; 9.POS_N; 10.POS_V; 11.POS_A; 12.!; 13.?; 14.Verified; 15.FollowerCount'
|
|
"""GraphLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
# Copyright: INRIA
import warnings
import operator
import sys
import time
import numpy as np
from scipy import linalg
from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance,
log_likelihood)
from ..utils import ConvergenceWarning
from ..utils.extmath import pinvh
from ..linear_model import lars_path
from ..linear_model import cd_fast
from ..cross_validation import _check_cv as check_cv, cross_val_score
from ..externals.joblib import Parallel, delayed
import collections
###############################################################################
# Helper functions to compute the objective and dual objective functions
# of the l1-penalized estimator
def _objective(mle, precision_, alpha):
cost = -log_likelihood(mle, precision_)
cost += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return cost
def _dual_gap(emp_cov, precision_, alpha):
"""Expression of the dual gap convergence criterion
The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
"""
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return gap
def alpha_max(emp_cov):
"""Find the maximum alpha for which there are some non-zeros off-diagonal.
Parameters
----------
emp_cov : 2D array, (n_features, n_features)
The sample covariance matrix
Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
"""
A = np.copy(emp_cov)
A.flat[::A.shape[0] + 1] = 0
return np.max(np.abs(A))
###############################################################################
# The g-lasso algorithm
def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
max_iter=100, verbose=False, return_costs=False,
eps=np.finfo(np.float).eps):
"""l1-penalized covariance estimator
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
See Also
--------
GraphLasso, GraphLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
_, n_features = emp_cov.shape
if alpha == 0:
return emp_cov, linalg.inv(emp_cov)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
# The different l1 regression solver have different numerical errors
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
for i in range(max_iter):
for idx in range(n_features):
sub_covariance = covariance_[indices != idx].T[indices != idx]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
# Use coordinate descent
coefs = -(precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps))
coefs, _, _ = cd_fast.enet_coordinate_descent_gram(
coefs, alpha, 0, sub_covariance, row, row,
max_iter, tol)
else:
# Use LARS
_, _, coefs = lars_path(
sub_covariance, row, Xy=row, Gram=sub_covariance,
alpha_min=alpha / (n_features - 1), copy_Gram=True,
method='lars')
coefs = coefs[:, -1]
# Update the precision matrix
precision_[idx, idx] = (
1. / (covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)))
precision_[indices != idx, idx] = (- precision_[idx, idx]
* coefs)
precision_[idx, indices != idx] = (- precision_[idx, idx]
* coefs)
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print(
'[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e'
% (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is '
'too ill-conditioned for this solver')
else:
warnings.warn('graph_lasso: did not converge after %i iteration:'
' dual gap: %.3e' % (max_iter, d_gap),
ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0]
+ '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
return covariance_, precision_, costs
return covariance_, precision_
class GraphLasso(EmpiricalCovariance):
"""Sparse inverse covariance estimation with an l1-penalized estimator.
Parameters
----------
alpha : positive float, optional
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
plotted at each iteration.
Attributes
----------
`covariance_` : array-like, shape (n_features, n_features)
Estimated covariance matrix
`precision_` : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
See Also
--------
graph_lasso, GraphLassoCV
"""
def __init__(self, alpha=.01, mode='cd', tol=1e-4, max_iter=100,
verbose=False):
self.alpha = alpha
self.mode = mode
self.tol = tol
self.max_iter = max_iter
self.verbose = verbose
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
emp_cov = empirical_covariance(X)
self.covariance_, self.precision_ = graph_lasso(
emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,)
return self
###############################################################################
# Cross-validation with GraphLasso
def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd',
tol=1e-4, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : list of positive floats
The list of regularization parameters, decreasing order.
X_test : 2D array, shape (n_test_samples, n_features), optional
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter : integer, optional
The maximum number of iterations.
verbose : integer, optional
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
`covariances_` : List of 2D ndarray, shape (n_features, n_features)
The estimated covariance matrices.
`precisions_` : List of 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrices.
`scores_` : List of float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
# Capture the errors, and move on
covariance_, precision_ = graph_lasso(
emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol,
max_iter=max_iter, verbose=inner_verbose)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graph_lasso_path] alpha: %.2e, score: %.2e'
% (alpha, this_score))
else:
print('[graph_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return covariances_, precisions_, scores_
return covariances_, precisions_
class GraphLassoCV(GraphLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty
Parameters
----------
alphas : integer, or list positive float, optional
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details.
n_refinements: strictly positive integer
The number of times the grid is refined. Not used if explicit
values of alphas are passed.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, defaults to
a 3-fold strategy
tol: positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter: integer, optional
Maximum number of iterations.
mode: {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs: int, optional
number of jobs to run in parallel (default 1).
verbose: boolean, optional
If verbose is True, the objective function and duality gap are
printed at each iteration.
Attributes
----------
`covariance_` : numpy.ndarray, shape (n_features, n_features)
Estimated covariance matrix.
`precision_` : numpy.ndarray, shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
`alpha_`: float
Penalization parameter selected.
`cv_alphas_`: list of float
All penalization parameters explored.
`grid_scores`: 2D numpy.ndarray (n_alphas, n_folds)
Log-likelihood score on left-out data across folds.
See Also
--------
graph_lasso, GraphLasso
Notes
-----
The search for the optimal penalization parameter (alpha) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of alpha then come out as missing values, but the optimum may
be close to these missing values.
"""
def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4,
max_iter=100, mode='cd', n_jobs=1, verbose=False):
self.alphas = alphas
self.n_refinements = n_refinements
self.mode = mode
self.tol = tol
self.max_iter = max_iter
self.verbose = verbose
self.cv = cv
self.n_jobs = n_jobs
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
X = np.asarray(X)
emp_cov = empirical_covariance(X)
cv = check_cv(self.cv, X, y, classifier=False)
# List of (alpha, scores, covs)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, self.verbose - 1)
if isinstance(n_alphas, collections.Sequence):
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = 1e-2 * alpha_1
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1),
n_alphas)[::-1]
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
# No need to see the convergence warnings on this grid:
# they will always be points that will not converge
# during the cross-validation
warnings.simplefilter('ignore', ConvergenceWarning)
# Compute the cross-validated loss on the current grid
# NOTE: Warm-restarting graph_lasso_path has been tried, and
# this did not allow to gain anything (same execution time with
# or without).
this_path = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose)(
delayed(graph_lasso_path)(
X[train], alphas=alphas,
X_test=X[test], mode=self.mode,
tol=self.tol,
max_iter=int(.1 * self.max_iter),
verbose=inner_verbose)
for train, test in cv)
# Little danse to transform the list in what we need
covs, _, scores = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
for index, (alpha, scores, _) in enumerate(path):
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of alpha for which there are
# non-zero coefficients
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif (best_index == last_finite_idx
and not best_index == len(path) - 1):
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
alpha_1 = path[best_index][0]
alpha_0 = path[best_index + 1][0]
elif best_index == len(path) - 1:
alpha_1 = path[best_index][0]
alpha_0 = 0.01 * path[best_index][0]
else:
alpha_1 = path[best_index - 1][0]
alpha_0 = path[best_index + 1][0]
if not isinstance(n_alphas, collections.Sequence):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0),
n_alphas + 2)
alphas = alphas[1:-1]
if self.verbose and n_refinements > 1:
print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is'
% (i + 1, n_refinements, time.time() - t0))
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
# Finally, compute the score with alpha = 0
alphas.append(0)
grid_scores.append(cross_val_score(EmpiricalCovariance(), X,
cv=cv, n_jobs=self.n_jobs,
verbose=inner_verbose))
self.grid_scores = np.array(grid_scores)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
self.cv_alphas_ = alphas
# Finally fit the model with the selected alpha
self.covariance_, self.precision_ = graph_lasso(
emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol,
max_iter=self.max_iter, verbose=inner_verbose)
return self
|
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
import codecs
from datetime import datetime
import json
import logging
import time
import urllib
import subprocess
from flask import Markup, g, render_template, request
from slimit import minify
from smartypants import smartypants
import app_config
import copytext
logging.basicConfig(format=app_config.LOG_FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(app_config.LOG_LEVEL)
class BetterJSONEncoder(json.JSONEncoder):
"""
A JSON encoder that intelligently handles datetimes.
"""
def default(self, obj):
if isinstance(obj, datetime):
encoded_object = obj.isoformat()
else:
encoded_object = json.JSONEncoder.default(self, obj)
return encoded_object
class Includer(object):
"""
Base class for Javascript and CSS psuedo-template-tags.
See `make_context` for an explanation of `asset_depth`.
"""
def __init__(self, asset_depth=0):
self.includes = []
self.tag_string = None
self.asset_depth = asset_depth
def push(self, path):
self.includes.append(path)
return ''
def _compress(self):
raise NotImplementedError()
def _relativize_path(self, path):
relative_path = path
if relative_path.startswith('www/'):
relative_path = relative_path[4:]
depth = len(request.path.split('/')) - (2 + self.asset_depth)
while depth > 0:
relative_path = '../%s' % relative_path
depth -= 1
return relative_path
def render(self, path):
if getattr(g, 'compile_includes', False):
if path in g.compiled_includes:
timestamp_path = g.compiled_includes[path]
else:
# Add a querystring to the rendered filename to prevent caching
timestamp_path = '%s?%i' % (path, int(time.time()))
out_path = 'www/%s' % path
if path not in g.compiled_includes:
logger.info('Rendering %s' % out_path)
with codecs.open(out_path, 'w', encoding='utf-8') as f:
f.write(self._compress())
# See "fab render"
g.compiled_includes[path] = timestamp_path
markup = Markup(self.tag_string % self._relativize_path(timestamp_path))
else:
response = ','.join(self.includes)
response = '\n'.join([
self.tag_string % self._relativize_path(src) for src in self.includes
])
markup = Markup(response)
del self.includes[:]
return markup
class JavascriptIncluder(Includer):
"""
Psuedo-template tag that handles collecting Javascript and serving appropriate clean or compressed versions.
"""
def __init__(self, *args, **kwargs):
Includer.__init__(self, *args, **kwargs)
self.tag_string = '<script type="text/javascript" src="%s"></script>'
def _compress(self):
output = []
src_paths = []
for src in self.includes:
src_paths.append('www/%s' % src)
with codecs.open('www/%s' % src, encoding='utf-8') as f:
if not src.endswith('.min.js'):
logger.info('- compressing %s' % src)
output.append(minify(f.read()))
else:
logger.info('- appending already compressed %s' % src)
output.append(f.read())
context = make_context()
context['paths'] = src_paths
header = render_template('_js_header.js', **context)
output.insert(0, header)
return '\n'.join(output)
class CSSIncluder(Includer):
"""
Psuedo-template tag that handles collecting CSS and serving appropriate clean or compressed versions.
"""
def __init__(self, *args, **kwargs):
Includer.__init__(self, *args, **kwargs)
self.tag_string = '<link rel="stylesheet" type="text/css" href="%s" />'
def _compress(self):
output = []
src_paths = []
for src in self.includes:
src_paths.append('%s' % src)
try:
compressed_src = subprocess.check_output(["node_modules/less/bin/lessc", "-x", src])
output.append(compressed_src)
except:
logger.error('It looks like "lessc" isn\'t installed. Try running: "npm install"')
raise
context = make_context()
context['paths'] = src_paths
header = render_template('_css_header.css', **context)
output.insert(0, header)
return '\n'.join(output)
def flatten_app_config():
"""
Returns a copy of app_config containing only
configuration variables.
"""
config = {}
# Only all-caps [constant] vars get included
for k, v in app_config.__dict__.items():
if k.upper() == k:
config[k] = v
return config
def make_context(asset_depth=0):
"""
Create a base-context for rendering views.
Includes app_config and JS/CSS includers.
`asset_depth` indicates how far into the url hierarchy
the assets are hosted. If 0, then they are at the root.
If 1 then at /foo/, etc.
"""
context = flatten_app_config()
try:
context['COPY'] = copytext.Copy(app_config.COPY_PATH)
except copytext.CopyException:
pass
context['JS'] = JavascriptIncluder(asset_depth=asset_depth)
context['CSS'] = CSSIncluder(asset_depth=asset_depth)
return context
def urlencode_filter(s):
"""
Filter to urlencode strings.
"""
if type(s) == 'Markup':
s = s.unescape()
# Evaulate COPY elements
if type(s) is not unicode:
s = unicode(s)
s = s.encode('utf8')
s = urllib.quote_plus(s)
return Markup(s)
def smarty_filter(s):
"""
Filter to smartypants strings.
"""
if type(s) == 'Markup':
s = s.unescape()
# Evaulate COPY elements
if type(s) is not unicode:
s = unicode(s)
s = s.encode('utf-8')
s = smartypants(s)
try:
return Markup(s)
except:
logger.error('This string failed to encode: %s' % s)
return Markup(s)
|
|
"""
mbed SDK
Copyright (c) 2011-2017 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function, absolute_import
from builtins import str
from os.path import splitext, basename, relpath, join
import shutil
from tools.utils import mkdir
from tools.export.gnuarmeclipse import GNUARMEclipse
from tools.export.gnuarmeclipse import UID
from tools.build_api import prepare_toolchain
from tools.targets import TARGET_MAP
from sys import flags, platform
# Global random number generator instance.
u = UID()
class Sw4STM32(GNUARMEclipse):
"""
Sw4STM32 class
"""
NAME = 'Sw4STM32'
TOOLCHAIN = 'GCC_ARM'
BOARDS = {
'B96B_F446VE':
{
'name': 'B96B-F446VE',
'mcuId': 'STM32F446VETx'
},
'DISCO_F051R8':
{
'name': 'STM32F0DISCOVERY',
'mcuId': 'STM32F051R8Tx'
},
'DISCO_F303VC':
{
'name': 'STM32F3DISCOVERY',
'mcuId': 'STM32F303VCTx'
},
'DISCO_F334C8':
{
'name': 'STM32F3348DISCOVERY',
'mcuId': 'STM32F334C8Tx'
},
'DISCO_F401VC':
{
'name': 'STM32F401C-DISCO',
'mcuId': 'STM32F401VCTx'
},
'DISCO_F407VG':
{
'name': 'STM32F4DISCOVERY',
'mcuId': 'STM32F407VGTx'
},
'DISCO_F413ZH':
{
'name': 'DISCO_F413',
'mcuId': 'STM32F413ZHTx'
},
'DISCO_F429ZI':
{
'name': 'STM32F429I-DISCO',
'mcuId': 'STM32F429ZITx'
},
'DISCO_F469NI':
{
'name': 'DISCO-F469NI',
'mcuId': 'STM32F469NIHx'
},
'DISCO_F746NG':
{
'name': 'STM32F746G-DISCO',
'mcuId': 'STM32F746NGHx'
},
'DISCO_F769NI':
{
'name': 'DISCO-F769NI',
'mcuId': 'STM32F769NIHx'
},
'DISCO_L053C8':
{
'name': 'STM32L0538DISCOVERY',
'mcuId': 'STM32L053C8Tx'
},
'DISCO_L072CZ_LRWAN1':
{
'name': 'DISCO-L072CZ-LRWAN1',
'mcuId': 'STM32L072CZTx'
},
'MTB_MURATA_ABZ':
{
'name': 'MTB-MURATA-ABZ',
'mcuId': 'STM32L0x2xZ'
},
'DISCO_L475VG_IOT01A':
{
'name': 'STM32L475G-DISCO',
'mcuId': 'STM32L475VGTx'
},
'DISCO_L476VG':
{
'name': 'STM32L476G-DISCO',
'mcuId': 'STM32L476VGTx'
},
'NUCLEO_F030R8':
{
'name': 'NUCLEO-F030R8',
'mcuId': 'STM32F030R8Tx'
},
'NUCLEO_F031K6':
{
'name': 'NUCLEO-F031K6',
'mcuId': 'STM32F031K6Tx'
},
'NUCLEO_F042K6':
{
'name': 'NUCLEO-F042K6',
'mcuId': 'STM32F042K6Tx'
},
'NUCLEO_F070RB':
{
'name': 'NUCLEO-F070RB',
'mcuId': 'STM32F070RBTx'
},
'NUCLEO_F072RB':
{
'name': 'NUCLEO-F072RB',
'mcuId': 'STM32F072RBTx'
},
'NUCLEO_F091RC':
{
'name': 'NUCLEO-F091RC',
'mcuId': 'STM32F091RCTx'
},
'NUCLEO_F103RB':
{
'name': 'NUCLEO-F103RB',
'mcuId': 'STM32F103RBTx'
},
'NUCLEO_F207ZG':
{
'name': 'NUCLEO-F207ZG',
'mcuId': 'STM32F207ZGTx'
},
'NUCLEO_F302R8':
{
'name': 'NUCLEO-F302R8',
'mcuId': 'STM32F302R8Tx'
},
'NUCLEO_F303K8':
{
'name': 'NUCLEO-F303K8',
'mcuId': 'STM32F303K8Tx'
},
'NUCLEO_F303RE':
{
'name': 'NUCLEO-F303RE',
'mcuId': 'STM32F303RETx'
},
'NUCLEO_F303ZE':
{
'name': 'NUCLEO-F303ZE',
'mcuId': 'STM32F303ZETx'
},
'NUCLEO_F334R8':
{
'name': 'NUCLEO-F334R8',
'mcuId': 'STM32F334R8Tx'
},
'NUCLEO_F401RE':
{
'name': 'NUCLEO-F401RE',
'mcuId': 'STM32F401RETx'
},
'NUCLEO_F410RB':
{
'name': 'NUCLEO-F410RB',
'mcuId': 'STM32F410RBTx'
},
'NUCLEO_F411RE':
{
'name': 'NUCLEO-F411RE',
'mcuId': 'STM32F411RETx'
},
'NUCLEO_F413ZH':
{
'name': 'NUCLEO-F413ZH',
'mcuId': 'STM32F413ZHTx'
},
'NUCLEO_F429ZI':
{
'name': 'NUCLEO-F429ZI',
'mcuId': 'STM32F429ZITx'
},
'NUCLEO_F446RE':
{
'name': 'NUCLEO-F446RE',
'mcuId': 'STM32F446RETx'
},
'NUCLEO_F446ZE':
{
'name': 'NUCLEO-F446ZE',
'mcuId': 'STM32F446ZETx'
},
'NUCLEO_F746ZG':
{
'name': 'NUCLEO-F746ZG',
'mcuId': 'STM32F746ZGTx'
},
'NUCLEO_F767ZI':
{
'name': 'NUCLEO-F767ZI',
'mcuId': 'STM32F767ZITx'
},
'NUCLEO_L011K4':
{
'name': 'NUCLEO-L011K4',
'mcuId': 'STM32L011K4Tx'
},
'NUCLEO_L031K6':
{
'name': 'NUCLEO-L031K6',
'mcuId': 'STM32L031K6Tx'
},
'NUCLEO_L053R8':
{
'name': 'NUCLEO-L053R8',
'mcuId': 'STM32L053R8Tx'
},
'NUCLEO_L073RZ':
{
'name': 'NUCLEO-L073RZ',
'mcuId': 'STM32L073RZTx'
},
'MTB_RAK811':
{
'name': 'MTB-RAK-811',
'mcuId': 'STM32L151CBUxA'
},
'NUCLEO_L152RE':
{
'name': 'NUCLEO-L152RE',
'mcuId': 'STM32L152RETx'
},
'NUCLEO_L432KC':
{
'name': 'NUCLEO-L432KC',
'mcuId': 'STM32L432KCUx'
},
'MTB_ADV_WISE_1510':
{
'name': 'MTB-ADV-WISE-1510',
'mcuId': 'STM32L443xC'
},
'NUCLEO_L476RG':
{
'name': 'NUCLEO-L476RG',
'mcuId': 'STM32L476RGTx'
},
'NUCLEO_L486RG':
{
'name': 'NUCLEO-L486RG',
'mcuId': 'STM32L486RGTx'
},
'NUCLEO_L496ZG':
{
'name': 'NUCLEO-L496ZG',
'mcuId': 'STM32L496ZGTx'
},
'NUCLEO_L496ZG_P':
{
'name': 'NUCLEO-L496ZG',
'mcuId': 'STM32L496ZGTx'
},
'NUCLEO_L4R5ZI':
{
'name': 'NUCLEO-L4R5ZI',
'mcuId': 'STM32L4R5ZITx'
}
}
@classmethod
def is_target_supported(cls, target_name):
target = TARGET_MAP[target_name]
target_supported = bool(set(target.resolution_order_names)
.intersection(set(cls.BOARDS.keys())))
toolchain_supported = cls.TOOLCHAIN in target.supported_toolchains
return target_supported and toolchain_supported
def __gen_dir(self, dir_name):
"""
Method that creates directory
"""
settings = join(self.export_dir, dir_name)
mkdir(settings)
def get_fpu_hardware(self, fpu_unit):
"""
Convert fpu unit name into hardware name.
"""
hw = ''
fpus = {
'fpv4spd16': 'fpv4-sp-d16',
'fpv5d16': 'fpv5-d16',
'fpv5spd16': 'fpv5-sp-d16'
}
if fpu_unit in fpus:
hw = fpus[fpu_unit]
return hw
def process_sw_options(self, opts, flags_in):
"""
Process System Workbench specific options.
System Workbench for STM32 has some compile options, which are not recognized by the GNUARMEclipse exporter.
Those are handled in this method.
"""
opts['c']['preprocess'] = False
if '-E' in flags_in['c_flags']:
opts['c']['preprocess'] = True
opts['cpp']['preprocess'] = False
if '-E' in flags_in['cxx_flags']:
opts['cpp']['preprocess'] = True
opts['c']['slowflashdata'] = False
if '-mslow-flash-data' in flags_in['c_flags']:
opts['c']['slowflashdata'] = True
opts['cpp']['slowflashdata'] = False
if '-mslow-flash-data' in flags_in['cxx_flags']:
opts['cpp']['slowflashdata'] = True
if opts['common']['optimization.messagelength']:
opts['common']['optimization.other'] += ' -fmessage-length=0'
if opts['common']['optimization.signedchar']:
opts['common']['optimization.other'] += ' -fsigned-char'
if opts['common']['optimization.nocommon']:
opts['common']['optimization.other'] += ' -fno-common'
if opts['common']['optimization.noinlinefunctions']:
opts['common']['optimization.other'] += ' -fno-inline-functions'
if opts['common']['optimization.freestanding']:
opts['common']['optimization.other'] += ' -ffreestanding'
if opts['common']['optimization.nobuiltin']:
opts['common']['optimization.other'] += ' -fno-builtin'
if opts['common']['optimization.spconstant']:
opts['common']['optimization.other'] += ' -fsingle-precision-constant'
if opts['common']['optimization.nomoveloopinvariants']:
opts['common']['optimization.other'] += ' -fno-move-loop-invariants'
if opts['common']['warnings.unused']:
opts['common']['warnings.other'] += ' -Wunused'
if opts['common']['warnings.uninitialized']:
opts['common']['warnings.other'] += ' -Wuninitialized'
if opts['common']['warnings.missingdeclaration']:
opts['common']['warnings.other'] += ' -Wmissing-declarations'
if opts['common']['warnings.pointerarith']:
opts['common']['warnings.other'] += ' -Wpointer-arith'
if opts['common']['warnings.padded']:
opts['common']['warnings.other'] += ' -Wpadded'
if opts['common']['warnings.shadow']:
opts['common']['warnings.other'] += ' -Wshadow'
if opts['common']['warnings.logicalop']:
opts['common']['warnings.other'] += ' -Wlogical-op'
if opts['common']['warnings.agreggatereturn']:
opts['common']['warnings.other'] += ' -Waggregate-return'
if opts['common']['warnings.floatequal']:
opts['common']['warnings.other'] += ' -Wfloat-equal'
opts['ld']['strip'] = False
if '-s' in flags_in['ld_flags']:
opts['ld']['strip'] = True
opts['ld']['shared'] = False
if '-shared' in flags_in['ld_flags']:
opts['ld']['shared'] = True
opts['ld']['soname'] = ''
opts['ld']['implname'] = ''
opts['ld']['defname'] = ''
for item in flags_in['ld_flags']:
if item.startswith('-Wl,-soname='):
opts['ld']['soname'] = item[len('-Wl,-soname='):]
if item.startswith('-Wl,--out-implib='):
opts['ld']['implname'] = item[len('-Wl,--out-implib='):]
if item.startswith('-Wl,--output-def='):
opts['ld']['defname'] = item[len('-Wl,--output-def='):]
opts['common']['arm.target.fpu.hardware'] = self.get_fpu_hardware(
opts['common']['arm.target.fpu.unit'])
opts['common']['debugging.codecov'] = False
if '-fprofile-arcs' in flags_in['common_flags'] and '-ftest-coverage' in flags_in['common_flags']:
opts['common']['debugging.codecov'] = True
# Passing linker options to linker with '-Wl,'-prefix.
for index in range(len(opts['ld']['flags'])):
item = opts['ld']['flags'][index]
if not item.startswith('-Wl,'):
opts['ld']['flags'][index] = '-Wl,' + item
# Strange System Workbench feature: If first parameter in Other flags is a
# define (-D...), Other flags will be replaced by defines and other flags
# are completely ignored. Moving -D parameters to defines.
for compiler in ['c', 'cpp', 'as']:
tmpList = opts[compiler]['other'].split(' ')
otherList = []
for item in tmpList:
if item.startswith('-D'):
opts[compiler]['defines'].append(str(item[2:]))
else:
otherList.append(item)
opts[compiler]['other'] = ' '.join(otherList)
# Assembler options
for as_def in opts['as']['defines']:
if '=' in as_def:
opts['as']['other'] += ' --defsym ' + as_def
else:
opts['as']['other'] += ' --defsym ' + as_def + '=1'
def generate(self):
"""
Generate the .project and .cproject files.
"""
options = {}
if not self.resources.linker_script:
raise NotSupportedException("No linker script found.")
print('\nCreate a System Workbench for STM32 managed project')
print('Project name: {0}'.format(self.project_name))
print('Target: {0}'.format(self.toolchain.target.name))
print('Toolchain: {0}'.format(self.TOOLCHAIN) + '\n')
self.resources.win_to_unix()
libraries = []
for lib in self.libraries:
library, _ = splitext(basename(lib))
libraries.append(library[3:])
self.system_libraries = [
'stdc++', 'supc++', 'm', 'c', 'gcc', 'nosys'
]
profiles = self.get_all_profiles()
self.as_defines = [s.replace('"', '"')
for s in self.toolchain.get_symbols(True)]
self.c_defines = [s.replace('"', '"')
for s in self.toolchain.get_symbols()]
self.cpp_defines = self.c_defines
self.include_path = []
for s in self.resources.inc_dirs:
self.include_path.append("../" + self.filter_dot(s))
print('Include folders: {0}'.format(len(self.include_path)))
self.compute_exclusions()
print('Exclude folders: {0}'.format(len(self.excluded_folders)))
ld_script = self.filter_dot(self.resources.linker_script)
print('Linker script: {0}'.format(ld_script))
lib_dirs = [self.filter_dot(s) for s in self.resources.lib_dirs]
preproc_cmd = basename(self.toolchain.preproc[0]) + " " + " ".join(self.toolchain.preproc[1:])
for id in ['debug', 'release']:
opts = {}
opts['common'] = {}
opts['as'] = {}
opts['c'] = {}
opts['cpp'] = {}
opts['ld'] = {}
opts['id'] = id
opts['name'] = opts['id'].capitalize()
profile = profiles[id]
# A small hack, do not bother with src_path again,
# pass an empty string to avoid crashing.
src_paths = ['']
toolchain = prepare_toolchain(
src_paths, "", self.toolchain.target.name, self.TOOLCHAIN, build_profile=[profile])
# Hack to fill in build_dir
toolchain.build_dir = self.toolchain.build_dir
flags = self.toolchain_flags(toolchain)
# Most GNU ARM Eclipse options have a parent,
# either debug or release.
if '-O0' in flags['common_flags'] or '-Og' in flags['common_flags']:
opts['parent_id'] = 'debug'
else:
opts['parent_id'] = 'release'
self.process_options(opts, flags)
opts['c']['defines'] = self.c_defines
opts['cpp']['defines'] = self.cpp_defines
opts['as']['defines'] = self.as_defines
self.process_sw_options(opts, flags)
opts['ld']['library_paths'] = [
self.filter_dot(s) for s in self.resources.lib_dirs]
opts['ld']['user_libraries'] = libraries
opts['ld']['system_libraries'] = self.system_libraries
opts['ld']['script'] = "linker-script-" + id + ".ld"
# Unique IDs used in multiple places.
uid = {}
uid['config'] = u.id
uid['tool_c_compiler'] = u.id
uid['tool_c_compiler_input'] = u.id
uid['tool_cpp_compiler'] = u.id
uid['tool_cpp_compiler_input'] = u.id
opts['uid'] = uid
options[id] = opts
ctx = {
'name': self.project_name,
'platform': platform,
'include_paths': self.include_path,
'config_header': self.config_header_ref.name,
'exclude_paths': '|'.join(self.excluded_folders),
'ld_script': ld_script,
'library_paths': lib_dirs,
'object_files': self.resources.objects,
'libraries': libraries,
'board_name': self.BOARDS[self.target.upper()]['name'],
'mcu_name': self.BOARDS[self.target.upper()]['mcuId'],
'cpp_cmd': preproc_cmd,
'options': options,
# id property of 'u' will generate new random identifier every time
# when called.
'u': u
}
self.__gen_dir('.settings')
self.gen_file('sw4stm32/language_settings_commom.tmpl',
ctx, '.settings/language.settings.xml')
self.gen_file('sw4stm32/project_common.tmpl', ctx, '.project')
self.gen_file('sw4stm32/cproject_common.tmpl', ctx, '.cproject')
self.gen_file('sw4stm32/makefile.targets.tmpl', ctx,
'makefile.targets', trim_blocks=True, lstrip_blocks=True)
self.gen_file('sw4stm32/launch.tmpl', ctx, self.project_name +
' ' + options['debug']['name'] + '.launch')
@staticmethod
def clean(_):
shutil.rmtree(".settings")
|
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for confusion matrix at thresholds."""
import math
from absl.testing import parameterized
import apache_beam as beam
from apache_beam.testing import util
import numpy as np
import tensorflow as tf
from tensorflow_model_analysis.eval_saved_model import testutil
from tensorflow_model_analysis.metrics import binary_confusion_matrices
from tensorflow_model_analysis.metrics import confusion_matrix_metrics
from tensorflow_model_analysis.metrics import metric_types
from tensorflow_model_analysis.metrics import metric_util
_TF_MAJOR_VERSION = int(tf.version.VERSION.split('.')[0])
class ConfusionMatrixMetricsTest(testutil.TensorflowModelAnalysisTest,
parameterized.TestCase):
@parameterized.named_parameters(
('auc', confusion_matrix_metrics.AUC(), 0.26),
('auc_precision_recall', confusion_matrix_metrics.AUCPrecisionRecall(),
0.36205),
('specificity_at_sensitivity',
confusion_matrix_metrics.SpecificityAtSensitivity(0.5), 0.2),
('sensitivity_at_specificity',
confusion_matrix_metrics.SensitivityAtSpecificity(0.5), 0.0),
('precision_at_recall', confusion_matrix_metrics.PrecisionAtRecall(0.5),
0.5),
('recall_at_precision', confusion_matrix_metrics.RecallAtPrecision(0.5),
1.0),
('true_positives', confusion_matrix_metrics.TruePositives(), 1.0),
('tp', confusion_matrix_metrics.TP(), 1.0),
('false_positives', confusion_matrix_metrics.FalsePositives(), 3.0),
('fp', confusion_matrix_metrics.FP(), 3.0),
('true_negatives', confusion_matrix_metrics.TrueNegatives(), 2.0),
('tn', confusion_matrix_metrics.TN(), 2.0),
('false_negatives', confusion_matrix_metrics.FalseNegatives(), 4.0),
('fn', confusion_matrix_metrics.FN(), 4.0),
('binary_accuracy', confusion_matrix_metrics.BinaryAccuracy(),
(1.0 + 2.0) / (1.0 + 2.0 + 3.0 + 4.0)),
('precision', confusion_matrix_metrics.Precision(), 1.0 / (1.0 + 3.0)),
('ppv', confusion_matrix_metrics.PPV(), 1.0 / (1.0 + 3.0)),
('recall', confusion_matrix_metrics.Recall(), 1.0 / (1.0 + 4.0)),
('tpr', confusion_matrix_metrics.TPR(), 1.0 / (1.0 + 4.0)),
('specificity', confusion_matrix_metrics.Specificity(), 2.0 /
(2.0 + 3.0)),
('tnr', confusion_matrix_metrics.TNR(), 2.0 / (2.0 + 3.0)),
('fall_out', confusion_matrix_metrics.FallOut(), 3.0 / (3.0 + 2.0)),
('fpr', confusion_matrix_metrics.FPR(), 3.0 / (3.0 + 2.0)),
('miss_rate', confusion_matrix_metrics.MissRate(), 4.0 / (4.0 + 1.0)),
('fnr', confusion_matrix_metrics.FNR(), 4.0 / (4.0 + 1.0)),
('negative_predictive_value',
confusion_matrix_metrics.NegativePredictiveValue(), 2.0 / (2.0 + 4.0)),
('npv', confusion_matrix_metrics.NPV(), 2.0 / (2.0 + 4.0)),
('false_discovery_rate', confusion_matrix_metrics.FalseDiscoveryRate(),
3.0 / (3.0 + 1.0)),
('false_omission_rate', confusion_matrix_metrics.FalseOmissionRate(),
4.0 / (4.0 + 2.0)),
('prevalence', confusion_matrix_metrics.Prevalence(),
(1.0 + 4.0) / (1.0 + 2.0 + 3.0 + 4.0)),
('prevalence_threshold', confusion_matrix_metrics.PrevalenceThreshold(),
(math.sqrt((1.0 / (1.0 + 4.0)) * (1.0 - (2.0 / (2.0 + 3.0)))) +
(2.0 / (2.0 + 3.0) - 1.0)) / ((1.0 / (1.0 + 4.0) +
(2.0 / (2.0 + 3.0)) - 1.0))),
('threat_score', confusion_matrix_metrics.ThreatScore(), 1.0 /
(1.0 + 4.0 + 3.0)),
('balanced_accuracy', confusion_matrix_metrics.BalancedAccuracy(),
((1.0 / (1.0 + 4.0)) + (2.0 / (2.0 + 3.0))) / 2),
('f1_score', confusion_matrix_metrics.F1Score(), 2 * 1.0 /
(2 * 1.0 + 3.0 + 4.0)),
('matthews_correlation_coefficient',
confusion_matrix_metrics.MatthewsCorrelationCoefficient(),
(1.0 * 2.0 - 3.0 * 4.0) / math.sqrt(
(1.0 + 3.0) * (1.0 + 4.0) * (2.0 + 3.0) * (2.0 + 4.0))),
('fowlkes_mallows_index', confusion_matrix_metrics.FowlkesMallowsIndex(),
math.sqrt(1.0 / (1.0 + 3.0) * 1.0 / (1.0 + 4.0))),
('informedness', confusion_matrix_metrics.Informedness(),
(1.0 / (1.0 + 4.0)) + (2.0 / (2.0 + 3.0)) - 1.0),
('markedness', confusion_matrix_metrics.Markedness(),
(1.0 / (1.0 + 3.0)) + (2.0 / (2.0 + 4.0)) - 1.0),
('positive_likelihood_ratio',
confusion_matrix_metrics.PositiveLikelihoodRatio(),
(1.0 / (1.0 + 4.0)) / (3.0 / (3.0 + 2.0))),
('negative_likelihood_ratio',
confusion_matrix_metrics.NegativeLikelihoodRatio(),
(4.0 / (4.0 + 1.0)) / (2.0 / (2.0 + 3.0))),
('diagnostic_odds_ratio', confusion_matrix_metrics.DiagnosticOddsRatio(),
((1.0 / 3.0)) / (4.0 / 2.0)),
('predicted_positive_rate',
confusion_matrix_metrics.PredictedPositiveRate(),
(1.0 + 3.0) / (1.0 + 2.0 + 3.0 + 4.0)),
)
def testConfusionMatrixMetrics(self, metric, expected_value):
if (_TF_MAJOR_VERSION < 2 and metric.__class__.__name__
in ('SpecificityAtSensitivity', 'SensitivityAtSpecificity',
'PrecisionAtRecall', 'RecallAtPrecision')):
self.skipTest('Not supported in TFv1.')
computations = metric.computations(example_weighted=True)
histogram = computations[0]
matrices = computations[1]
metrics = computations[2]
# tp = 1
# tn = 2
# fp = 3
# fn = 4
example1 = {
'labels': np.array([1.0]),
'predictions': np.array([0.6]),
'example_weights': np.array([1.0]),
}
example2 = {
'labels': np.array([0.0]),
'predictions': np.array([0.3]),
'example_weights': np.array([1.0]),
}
example3 = {
'labels': np.array([0.0]),
'predictions': np.array([0.2]),
'example_weights': np.array([1.0]),
}
example4 = {
'labels': np.array([0.0]),
'predictions': np.array([0.6]),
'example_weights': np.array([1.0]),
}
example5 = {
'labels': np.array([0.0]),
'predictions': np.array([0.7]),
'example_weights': np.array([1.0]),
}
example6 = {
'labels': np.array([0.0]),
'predictions': np.array([0.8]),
'example_weights': np.array([1.0]),
}
example7 = {
'labels': np.array([1.0]),
'predictions': np.array([0.1]),
'example_weights': np.array([1.0]),
}
example8 = {
'labels': np.array([1.0]),
'predictions': np.array([0.2]),
'example_weights': np.array([1.0]),
}
example9 = {
'labels': np.array([1.0]),
'predictions': np.array([0.3]),
'example_weights': np.array([1.0]),
}
example10 = {
'labels': np.array([1.0]),
'predictions': np.array([0.4]),
'example_weights': np.array([1.0]),
}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([
example1, example2, example3, example4, example5, example6,
example7, example8, example9, example10
])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'ComputeHistogram' >> beam.CombinePerKey(histogram.combiner)
| 'ComputeMatrices' >> beam.Map(
lambda x: (x[0], matrices.result(x[1]))) # pyformat: ignore
| 'ComputeMetrics' >> beam.Map(lambda x: (x[0], metrics.result(x[1])))
) # pyformat: ignore
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
self.assertLen(got_metrics, 1)
key = metrics.keys[0]
self.assertDictElementsAlmostEqual(
got_metrics, {key: expected_value}, places=5)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
@parameterized.named_parameters(
('auc', confusion_matrix_metrics.AUC(), 0.64286),
('auc_precision_recall', confusion_matrix_metrics.AUCPrecisionRecall(),
0.37467),
('specificity_at_sensitivity',
confusion_matrix_metrics.SpecificityAtSensitivity(0.5), 0.642857),
('sensitivity_at_specificity',
confusion_matrix_metrics.SensitivityAtSpecificity(0.5), 1.0),
('precision_at_recall', confusion_matrix_metrics.PrecisionAtRecall(0.5),
0.58333),
('recall_at_precision', confusion_matrix_metrics.RecallAtPrecision(0.5),
1.0),
('true_positives', confusion_matrix_metrics.TruePositives(), 0.7),
('false_positives', confusion_matrix_metrics.FalsePositives(), 0.5),
('true_negatives', confusion_matrix_metrics.TrueNegatives(), 0.9),
('false_negatives', confusion_matrix_metrics.FalseNegatives(), 0.0),
('binary_accuracy', confusion_matrix_metrics.BinaryAccuracy(),
(0.7 + 0.9) / (0.7 + 0.9 + 0.5 + 0.0)),
('precision', confusion_matrix_metrics.Precision(), 0.7 / (0.7 + 0.5)),
('recall', confusion_matrix_metrics.Recall(), 0.7 / (0.7 + 0.0)),
('specificity', confusion_matrix_metrics.Specificity(), 0.9 /
(0.9 + 0.5)),
('fall_out', confusion_matrix_metrics.FallOut(), 0.5 / (0.5 + 0.9)),
('miss_rate', confusion_matrix_metrics.MissRate(), 0.0 / (0.0 + 0.7)),
('negative_predictive_value',
confusion_matrix_metrics.NegativePredictiveValue(), 0.9 / (0.9 + 0.0)),
('false_discovery_rate', confusion_matrix_metrics.FalseDiscoveryRate(),
0.5 / (0.5 + 0.7)),
('false_omission_rate', confusion_matrix_metrics.FalseOmissionRate(),
0.0 / (0.0 + 0.9)),
('prevalence', confusion_matrix_metrics.Prevalence(),
(0.7 + 0.0) / (0.7 + 0.9 + 0.5 + 0.0)),
('prevalence_threshold', confusion_matrix_metrics.PrevalenceThreshold(),
(math.sqrt((0.7 / (0.7 + 0.0)) * (1.0 - (0.9 / (0.9 + 0.5)))) +
(0.9 / (0.9 + 0.5) - 1.0)) / ((0.7 / (0.7 + 0.0) +
(0.9 / (0.9 + 0.5)) - 1.0))),
('threat_score', confusion_matrix_metrics.ThreatScore(), 0.7 /
(0.7 + 0.0 + 0.5)),
('balanced_accuracy', confusion_matrix_metrics.BalancedAccuracy(),
((0.7 / (0.7 + 0.0)) + (0.9 / (0.9 + 0.5))) / 2),
('f1_score', confusion_matrix_metrics.F1Score(), 2 * 0.7 /
(2 * 0.7 + 0.5 + 0.0)),
('matthews_correlation_coefficient',
confusion_matrix_metrics.MatthewsCorrelationCoefficient(),
(0.7 * 0.9 - 0.5 * 0.0) / math.sqrt(
(0.7 + 0.5) * (0.7 + 0.0) * (0.9 + 0.5) * (0.9 + 0.0))),
('fowlkes_mallows_index', confusion_matrix_metrics.FowlkesMallowsIndex(),
math.sqrt(0.7 / (0.7 + 0.5) * 0.7 / (0.7 + 0.0))),
('informedness', confusion_matrix_metrics.Informedness(),
(0.7 / (0.7 + 0.0)) + (0.9 / (0.9 + 0.5)) - 1.0),
('markedness', confusion_matrix_metrics.Markedness(),
(0.7 / (0.7 + 0.5)) + (0.9 / (0.9 + 0.0)) - 1.0),
('positive_likelihood_ratio',
confusion_matrix_metrics.PositiveLikelihoodRatio(),
(0.7 / (0.7 + 0.0)) / (0.5 / (0.5 + 0.9))),
('negative_likelihood_ratio',
confusion_matrix_metrics.NegativeLikelihoodRatio(),
(0.0 / (0.0 + 0.7)) / (0.9 / (0.9 + 0.5))),
('predicted_positive_rate',
confusion_matrix_metrics.PredictedPositiveRate(),
(0.7 + 0.5) / (0.7 + 0.9 + 0.5 + 0.0)),
)
def testConfusionMatrixMetricsWithWeights(self, metric, expected_value):
if (_TF_MAJOR_VERSION < 2 and metric.__class__.__name__
in ('SpecificityAtSensitivity', 'SensitivityAtSpecificity',
'PrecisionAtRecall', 'RecallAtPrecision')):
self.skipTest('Not supported in TFv1.')
computations = metric.computations(example_weighted=True)
histogram = computations[0]
matrix = computations[1]
derived_metric = computations[2]
# tp = 0.7
# tn = 0.9
# fp = 0.5
# fn = 0.0
example1 = {
'labels': np.array([0.0]),
'predictions': np.array([1.0]),
'example_weights': np.array([0.5]),
}
example2 = {
'labels': np.array([1.0]),
'predictions': np.array([0.7]),
'example_weights': np.array([0.7]),
}
example3 = {
'labels': np.array([0.0]),
'predictions': np.array([0.5]),
'example_weights': np.array([0.9]),
}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([example1, example2, example3])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'ComputeHistogram' >> beam.CombinePerKey(histogram.combiner)
| 'ComputeConfusionMatrix' >> beam.Map(
lambda x: (x[0], matrix.result(x[1]))) # pyformat: disable
| 'ComputeMetric' >> beam.Map( # pyformat: disable
lambda x: (x[0], derived_metric.result(x[1]))))
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
key = metric_types.MetricKey(name=metric.name, example_weighted=True)
self.assertDictElementsAlmostEqual(
got_metrics, {key: expected_value}, places=5)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
@parameterized.named_parameters(
('auc', confusion_matrix_metrics.AUC(), 0.8571428),
('auc_precision_recall', confusion_matrix_metrics.AUCPrecisionRecall(),
0.77369833),
('true_positives', confusion_matrix_metrics.TruePositives(), 1.4),
('false_positives', confusion_matrix_metrics.FalsePositives(), 0.6),
('true_negatives', confusion_matrix_metrics.TrueNegatives(), 1.0),
('false_negatives', confusion_matrix_metrics.FalseNegatives(), 0.0),
)
def testConfusionMatrixMetricsWithFractionalLabels(self, metric,
expected_value):
computations = metric.computations(example_weighted=True)
histogram = computations[0]
matrix = computations[1]
derived_metric = computations[2]
# The following examples will be expanded to:
#
# prediction | label | weight
# 0.0 | - | 1.0
# 0.7 | - | 0.4
# 0.7 | + | 0.6
# 1.0 | - | 0.2
# 1.0 | + | 0.8
example1 = {
'labels': np.array([0.0]),
'predictions': np.array([0.0]),
'example_weights': np.array([1.0]),
}
example2 = {
'labels': np.array([0.6]),
'predictions': np.array([0.7]),
'example_weights': np.array([1.0]),
}
example3 = {
'labels': np.array([0.8]),
'predictions': np.array([1.0]),
'example_weights': np.array([1.0]),
}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([example1, example2, example3])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'ComputeHistogram' >> beam.CombinePerKey(histogram.combiner)
| 'ComputeConfusionMatrix' >> beam.Map(
lambda x: (x[0], matrix.result(x[1]))) # pyformat: disable
| 'ComputeMetric' >> beam.Map( # pyformat: disable
lambda x: (x[0], derived_metric.result(x[1]))))
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
key = metric_types.MetricKey(name=metric.name, example_weighted=True)
self.assertDictElementsAlmostEqual(
got_metrics, {key: expected_value}, places=5)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
@parameterized.named_parameters(
('precision@2 (using sub_key)', confusion_matrix_metrics.Precision(), 2,
1.6 / (1.6 + 3.2)),
('precision@2 (using param)', confusion_matrix_metrics.Precision(top_k=2),
None, 1.6 / (1.6 + 3.2)),
('recall@2 (using sub_key)', confusion_matrix_metrics.Recall(), 2, 1.6 /
(1.6 + 0.8)),
('recall@2 (using param)', confusion_matrix_metrics.Recall(top_k=2), None,
1.6 / (1.6 + 0.8)),
('precision@3 (using sub_key)', confusion_matrix_metrics.Precision(), 3,
1.9 / (1.9 + 5.3)),
('recall@3 (using sub_key)', confusion_matrix_metrics.Recall(), 3, 1.9 /
(1.9 + 0.5)),
)
def testConfusionMatrixMetricsWithTopK(self, metric, top_k, expected_value):
computations = metric.computations(
sub_keys=[metric_types.SubKey(top_k=top_k)], example_weighted=True)
histogram = computations[0]
matrix = computations[1]
derived_metric = computations[2]
# top_k = 2
# TP = 0.5*0 + 0.7*1 + 0.9*1 + 0.3*0 = 1.6
# FP = 0.5*2 + 0.7*1 + 0.9*1 + 0.3*2 = 3.2
# FN = 0.5*1 + 0.7*0 + 0.9*0 + 0.3*1 = 0.8
#
# top_k = 3
# TP = 0.5*0 + 0.7*1 + 0.9*1 + 0.3*1 = 1.9
# FP = 0.5*3 + 0.7*2 + 0.9*2 + 0.3*2 = 5.3
# FN = 0.5*1 + 0.7*0 + 0.9*0 + 0.3*0 = 0.5
example1 = {
'labels': np.array([2]),
'predictions': np.array([0.1, 0.2, 0.1, 0.25, 0.35]),
'example_weights': np.array([0.5]),
}
example2 = {
'labels': np.array([1]),
'predictions': np.array([0.2, 0.3, 0.05, 0.15, 0.3]),
'example_weights': np.array([0.7]),
}
example3 = {
'labels': np.array([3]),
'predictions': np.array([0.01, 0.2, 0.09, 0.5, 0.2]),
'example_weights': np.array([0.9]),
}
example4 = {
'labels': np.array([1]),
'predictions': np.array([0.3, 0.2, 0.05, 0.4, 0.05]),
'example_weights': np.array([0.3]),
}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([example1, example2, example3, example4])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'ComputeHistogram' >> beam.CombinePerKey(histogram.combiner)
| 'ComputeConfusionMatrix' >> beam.Map(
lambda x: (x[0], matrix.result(x[1]))) # pyformat: disable
| 'ComputeMetric' >> beam.Map( # pyformat: disable
lambda x: (x[0], derived_metric.result(x[1]))))
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
if top_k:
sub_key = metric_types.SubKey(top_k=top_k)
else:
sub_key = metric_types.SubKey(top_k=metric.get_config()['top_k'])
key = metric_types.MetricKey(
name=metric.name, sub_key=sub_key, example_weighted=True)
self.assertDictElementsAlmostEqual(
got_metrics, {key: expected_value}, places=5)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
@parameterized.named_parameters(
('precision (class_id=1 using sub_key)',
confusion_matrix_metrics.Precision(thresholds=[0.1]), 1, 0.5 /
(0.5 + 1.6)),
('precision (class_id=1 using param)',
confusion_matrix_metrics.Precision(
class_id=1, thresholds=[0.1]), None, 0.5 / (0.5 + 1.6)),
('recall (class_id=3 using sub_key)',
confusion_matrix_metrics.Recall(thresholds=[0.1]), 3, 0.7 / (0.7 + 0.9)),
('recall (class_id=3 using param)',
confusion_matrix_metrics.Recall(
class_id=3, thresholds=[0.1]), None, 0.7 / (0.7 + 0.9)),
)
def testConfusionMatrixMetricsWithClassId(self, metric, class_id,
expected_value):
computations = metric.computations(
sub_keys=[metric_types.SubKey(class_id=class_id)],
example_weighted=True)
histogram = computations[0]
matrix = computations[1]
derived_metric = computations[2]
# class_id = 1, threshold = 0.1
# TP = 0.5*1 + 0.7*0 + 0.9*0 + 0.3*0 = 0.5
# FP = 0.5*0 + 0.7*1 + 0.9*1 + 0.3*0 = 1.6
# FN = 0.5*0 + 0.7*0 + 0.9*0 + 0.3*1 = 0.3
#
# class_id = 3, threshold = 0.1
# TP = 0.5*0 + 0.7*1 + 0.9*0 + 0.3*0 = 0.7
# FP = 0.5*1 + 0.7*0 + 0.9*0 + 0.3*1 = 0.8
# FN = 0.5*0 + 0.7*0 + 0.9*1 + 0.3*0 = 0.9
example1 = {
'labels': np.array([1]),
'predictions': np.array([0.1, 0.2, 0.1, 0.25, 0.35]),
'example_weights': np.array([0.5]),
}
example2 = {
'labels': np.array([3]),
'predictions': np.array([0.2, 0.3, 0.05, 0.15, 0.3]),
'example_weights': np.array([0.7]),
}
example3 = {
'labels': np.array([3]),
'predictions': np.array([0.01, 0.2, 0.2, 0.09, 0.5]),
'example_weights': np.array([0.9]),
}
example4 = {
'labels': np.array([1]),
'predictions': np.array([0.1, 0.05, 0.3, 0.4, 0.05]),
'example_weights': np.array([0.3]),
}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([example1, example2, example3, example4])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'ComputeHistogram' >> beam.CombinePerKey(histogram.combiner)
| 'ComputeConfusionMatrix' >> beam.Map(
lambda x: (x[0], matrix.result(x[1]))) # pyformat: disable
| 'ComputeMetric' >> beam.Map( # pyformat: disable
lambda x: (x[0], derived_metric.result(x[1]))))
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
if class_id:
sub_key = metric_types.SubKey(class_id=class_id)
else:
sub_key = metric_types.SubKey(
class_id=metric.get_config()['class_id'])
key = metric_types.MetricKey(
name=metric.name, sub_key=sub_key, example_weighted=True)
self.assertDictElementsAlmostEqual(
got_metrics, {key: expected_value}, places=5)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
def testConfusionMatrixMetricsWithNan(self):
computations = confusion_matrix_metrics.Specificity().computations(
example_weighted=True)
histogram = computations[0]
matrices = computations[1]
metrics = computations[2]
example1 = {
'labels': np.array([1.0]),
'predictions': np.array([1.0]),
'example_weights': np.array([1.0]),
}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([example1])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'ComputeHistogram' >> beam.CombinePerKey(histogram.combiner)
| 'ComputeMatrices' >> beam.Map(
lambda x: (x[0], matrices.result(x[1]))) # pyformat: ignore
| 'ComputeMetrics' >> beam.Map(lambda x: (x[0], metrics.result(x[1])))
) # pyformat: ignore
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
self.assertLen(got_metrics, 1)
key = metrics.keys[0]
self.assertIn(key, got_metrics)
self.assertTrue(math.isnan(got_metrics[key]))
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
def testRaisesErrorIfClassIDAndTopKBothUsed(self):
with self.assertRaisesRegex(
ValueError,
'Metric precision is configured with both class_id=2 and top_k=2 '
'settings. Only one may be specified at a time.'):
confusion_matrix_metrics.Precision(
class_id=2, top_k=2).computations(example_weighted=True)
@parameterized.named_parameters(
('class_id as param and class_id as sub_key',
confusion_matrix_metrics.Precision(class_id=2), 2, None),
('top_k as param and top_k as sub_key',
confusion_matrix_metrics.Precision(top_k=2), None, 2),
('class_id as param and top_k as sub_key',
confusion_matrix_metrics.Precision(class_id=2), None, 2),
)
def testRaisesErrorIfOverlappingSettings(self, metric, class_id, top_k):
with self.assertRaisesRegex(ValueError,
'.*is configured with overlapping settings.*'):
metric.computations(
sub_keys=[metric_types.SubKey(class_id=class_id, top_k=top_k)])
def testConfusionMatrixAtThresholds(self):
computations = confusion_matrix_metrics.ConfusionMatrixAtThresholds(
thresholds=[0.3, 0.5, 0.8]).computations(example_weighted=True)
histogram = computations[0]
matrices = computations[1]
metrics = computations[2]
example1 = {
'labels': np.array([0.0]),
'predictions': np.array([0.0]),
'example_weights': np.array([1.0]),
}
example2 = {
'labels': np.array([0.0]),
'predictions': np.array([0.5]),
'example_weights': np.array([1.0]),
}
example3 = {
'labels': np.array([1.0]),
'predictions': np.array([0.3]),
'example_weights': np.array([1.0]),
}
example4 = {
'labels': np.array([1.0]),
'predictions': np.array([0.9]),
'example_weights': np.array([1.0]),
}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([example1, example2, example3, example4])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'ComputeHistogram' >> beam.CombinePerKey(histogram.combiner)
| 'ComputeMatrices' >> beam.Map(
lambda x: (x[0], matrices.result(x[1]))) # pyformat: ignore
| 'ComputeMetrics' >> beam.Map(lambda x: (x[0], metrics.result(x[1])))
) # pyformat: ignore
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
self.assertLen(got_metrics, 1)
key = metric_types.MetricKey(
name='confusion_matrix_at_thresholds', example_weighted=True)
self.assertIn(key, got_metrics)
got_metric = got_metrics[key]
self.assertEqual(
binary_confusion_matrices.Matrices(
thresholds=[0.3, 0.5, 0.8],
tp=[1.0, 1.0, 1.0],
tn=[1.0, 2.0, 2.0],
fp=[1.0, 0.0, 0.0],
fn=[1.0, 1.0, 1.0]), got_metric)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
tf.test.main()
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for V2 summary ops from summary_ops_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
import six
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import step_stats_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine.sequential import Sequential
from tensorflow.python.keras.engine.training import Model
from tensorflow.python.keras.layers.core import Activation
from tensorflow.python.keras.layers.core import Dense
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import summary_ops_v2 as summary_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class SummaryOpsCoreTest(test_util.TensorFlowTestCase):
def testWrite(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
output = summary_ops.write('tag', 42, step=12)
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
value = events[1].summary.value[0]
self.assertEqual('tag', value.tag)
self.assertEqual(42, to_numpy(value))
def testWrite_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function
def f():
with writer.as_default():
return summary_ops.write('tag', 42, step=12)
output = f()
self.assertTrue(output.numpy())
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
value = events[1].summary.value[0]
self.assertEqual('tag', value.tag)
self.assertEqual(42, to_numpy(value))
def testWrite_metadata(self):
logdir = self.get_temp_dir()
metadata = summary_pb2.SummaryMetadata()
metadata.plugin_data.plugin_name = 'foo'
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('obj', 0, 0, metadata=metadata)
summary_ops.write('bytes', 0, 0, metadata=metadata.SerializeToString())
m = constant_op.constant(metadata.SerializeToString())
summary_ops.write('string_tensor', 0, 0, metadata=m)
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(metadata, events[1].summary.value[0].metadata)
self.assertEqual(metadata, events[2].summary.value[0].metadata)
self.assertEqual(metadata, events[3].summary.value[0].metadata)
def testWrite_name(self):
@def_function.function
def f():
output = summary_ops.write('tag', 42, step=12, name='anonymous')
self.assertTrue(output.name.startswith('anonymous'))
f()
def testWrite_ndarray(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('tag', [[1, 2], [3, 4]], step=12)
events = events_from_logdir(logdir)
value = events[1].summary.value[0]
self.assertAllEqual([[1, 2], [3, 4]], to_numpy(value))
def testWrite_tensor(self):
logdir = self.get_temp_dir()
with context.eager_mode():
t = constant_op.constant([[1, 2], [3, 4]])
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('tag', t, step=12)
expected = t.numpy()
events = events_from_logdir(logdir)
value = events[1].summary.value[0]
self.assertAllEqual(expected, to_numpy(value))
def testWrite_tensor_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function
def f(t):
with writer.as_default():
summary_ops.write('tag', t, step=12)
t = constant_op.constant([[1, 2], [3, 4]])
f(t)
expected = t.numpy()
events = events_from_logdir(logdir)
value = events[1].summary.value[0]
self.assertAllEqual(expected, to_numpy(value))
def testWrite_stringTensor(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('tag', [b'foo', b'bar'], step=12)
events = events_from_logdir(logdir)
value = events[1].summary.value[0]
self.assertAllEqual([b'foo', b'bar'], to_numpy(value))
@test_util.run_gpu_only
def testWrite_gpuDeviceContext(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer(logdir).as_default():
with ops.device('/GPU:0'):
value = constant_op.constant(42.0)
step = constant_op.constant(12, dtype=dtypes.int64)
summary_ops.write('tag', value, step=step).numpy()
empty_metadata = summary_pb2.SummaryMetadata()
events = events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(12, events[1].step)
self.assertEqual(42, to_numpy(events[1].summary.value[0]))
self.assertEqual(empty_metadata, events[1].summary.value[0].metadata)
@test_util.also_run_as_tf_function
def testWrite_noDefaultWriter(self):
# Use assertAllEqual instead of assertFalse since it works in a defun.
self.assertAllEqual(False, summary_ops.write('tag', 42, step=0))
@test_util.also_run_as_tf_function
def testWrite_noStep_okayIfAlsoNoDefaultWriter(self):
# Use assertAllEqual instead of assertFalse since it works in a defun.
self.assertAllEqual(False, summary_ops.write('tag', 42))
@test_util.also_run_as_tf_function
def testWrite_noStep(self):
logdir = self.get_temp_dir()
with summary_ops.create_file_writer(logdir).as_default():
with self.assertRaisesRegex(ValueError, 'No step set'):
summary_ops.write('tag', 42)
def testWrite_usingDefaultStep(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
with summary_ops.create_file_writer(logdir).as_default():
summary_ops.set_step(1)
summary_ops.write('tag', 1.0)
summary_ops.set_step(2)
summary_ops.write('tag', 1.0)
mystep = variables.Variable(10, dtype=dtypes.int64)
summary_ops.set_step(mystep)
summary_ops.write('tag', 1.0)
mystep.assign_add(1)
summary_ops.write('tag', 1.0)
events = events_from_logdir(logdir)
self.assertEqual(5, len(events))
self.assertEqual(1, events[1].step)
self.assertEqual(2, events[2].step)
self.assertEqual(10, events[3].step)
self.assertEqual(11, events[4].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStepConstant_fromFunction(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
writer = summary_ops.create_file_writer(logdir)
@def_function.function
def f():
with writer.as_default():
summary_ops.write('tag', 1.0)
summary_ops.set_step(1)
f()
summary_ops.set_step(2)
f()
events = events_from_logdir(logdir)
self.assertEqual(3, len(events))
self.assertEqual(1, events[1].step)
# The step value will still be 1 because the value was captured at the
# time the function was first traced.
self.assertEqual(1, events[2].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStepVariable_fromFunction(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
writer = summary_ops.create_file_writer(logdir)
@def_function.function
def f():
with writer.as_default():
summary_ops.write('tag', 1.0)
mystep = variables.Variable(0, dtype=dtypes.int64)
summary_ops.set_step(mystep)
f()
mystep.assign_add(1)
f()
mystep.assign(10)
f()
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(1, events[2].step)
self.assertEqual(10, events[3].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStepConstant_fromLegacyGraph(self):
logdir = self.get_temp_dir()
try:
with context.graph_mode():
writer = summary_ops.create_file_writer(logdir)
summary_ops.set_step(1)
with writer.as_default():
write_op = summary_ops.write('tag', 1.0)
summary_ops.set_step(2)
with self.cached_session() as sess:
sess.run(writer.init())
sess.run(write_op)
sess.run(write_op)
sess.run(writer.flush())
events = events_from_logdir(logdir)
self.assertEqual(3, len(events))
self.assertEqual(1, events[1].step)
# The step value will still be 1 because the value was captured at the
# time the graph was constructed.
self.assertEqual(1, events[2].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_usingDefaultStepVariable_fromLegacyGraph(self):
logdir = self.get_temp_dir()
try:
with context.graph_mode():
writer = summary_ops.create_file_writer(logdir)
mystep = variables.Variable(0, dtype=dtypes.int64)
summary_ops.set_step(mystep)
with writer.as_default():
write_op = summary_ops.write('tag', 1.0)
first_assign_op = mystep.assign_add(1)
second_assign_op = mystep.assign(10)
with self.cached_session() as sess:
sess.run(writer.init())
sess.run(mystep.initializer)
sess.run(write_op)
sess.run(first_assign_op)
sess.run(write_op)
sess.run(second_assign_op)
sess.run(write_op)
sess.run(writer.flush())
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(1, events[2].step)
self.assertEqual(10, events[3].step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testWrite_recordIf_constant(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
self.assertTrue(summary_ops.write('default', 1, step=0))
with summary_ops.record_if(True):
self.assertTrue(summary_ops.write('set_on', 1, step=0))
with summary_ops.record_if(False):
self.assertFalse(summary_ops.write('set_off', 1, step=0))
events = events_from_logdir(logdir)
self.assertEqual(3, len(events))
self.assertEqual('default', events[1].summary.value[0].tag)
self.assertEqual('set_on', events[2].summary.value[0].tag)
def testWrite_recordIf_constant_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function
def f():
with writer.as_default():
# Use assertAllEqual instead of assertTrue since it works in a defun.
self.assertAllEqual(summary_ops.write('default', 1, step=0), True)
with summary_ops.record_if(True):
self.assertAllEqual(summary_ops.write('set_on', 1, step=0), True)
with summary_ops.record_if(False):
self.assertAllEqual(summary_ops.write('set_off', 1, step=0), False)
f()
events = events_from_logdir(logdir)
self.assertEqual(3, len(events))
self.assertEqual('default', events[1].summary.value[0].tag)
self.assertEqual('set_on', events[2].summary.value[0].tag)
def testWrite_recordIf_callable(self):
logdir = self.get_temp_dir()
with context.eager_mode():
step = variables.Variable(-1, dtype=dtypes.int64)
def record_fn():
step.assign_add(1)
return int(step % 2) == 0
with summary_ops.create_file_writer_v2(logdir).as_default():
with summary_ops.record_if(record_fn):
self.assertTrue(summary_ops.write('tag', 1, step=step))
self.assertFalse(summary_ops.write('tag', 1, step=step))
self.assertTrue(summary_ops.write('tag', 1, step=step))
self.assertFalse(summary_ops.write('tag', 1, step=step))
self.assertTrue(summary_ops.write('tag', 1, step=step))
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(2, events[2].step)
self.assertEqual(4, events[3].step)
def testWrite_recordIf_callable_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
step = variables.Variable(-1, dtype=dtypes.int64)
@def_function.function
def record_fn():
step.assign_add(1)
return math_ops.equal(step % 2, 0)
@def_function.function
def f():
with writer.as_default():
with summary_ops.record_if(record_fn):
return [
summary_ops.write('tag', 1, step=step),
summary_ops.write('tag', 1, step=step),
summary_ops.write('tag', 1, step=step)]
self.assertAllEqual(f(), [True, False, True])
self.assertAllEqual(f(), [False, True, False])
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(2, events[2].step)
self.assertEqual(4, events[3].step)
def testWrite_recordIf_tensorInput_fromFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[], dtype=dtypes.int64)])
def f(step):
with writer.as_default():
with summary_ops.record_if(math_ops.equal(step % 2, 0)):
return summary_ops.write('tag', 1, step=step)
self.assertTrue(f(0))
self.assertFalse(f(1))
self.assertTrue(f(2))
self.assertFalse(f(3))
self.assertTrue(f(4))
events = events_from_logdir(logdir)
self.assertEqual(4, len(events))
self.assertEqual(0, events[1].step)
self.assertEqual(2, events[2].step)
self.assertEqual(4, events[3].step)
@test_util.also_run_as_tf_function
def testGetSetStep(self):
try:
self.assertIsNone(summary_ops.get_step())
summary_ops.set_step(1)
# Use assertAllEqual instead of assertEqual since it works in a defun.
self.assertAllEqual(1, summary_ops.get_step())
summary_ops.set_step(constant_op.constant(2))
self.assertAllEqual(2, summary_ops.get_step())
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testGetSetStep_variable(self):
with context.eager_mode():
try:
mystep = variables.Variable(0)
summary_ops.set_step(mystep)
self.assertAllEqual(0, summary_ops.get_step().read_value())
mystep.assign_add(1)
self.assertAllEqual(1, summary_ops.get_step().read_value())
# Check that set_step() properly maintains reference to variable.
del mystep
self.assertAllEqual(1, summary_ops.get_step().read_value())
summary_ops.get_step().assign_add(1)
self.assertAllEqual(2, summary_ops.get_step().read_value())
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def testGetSetStep_variable_fromFunction(self):
with context.eager_mode():
try:
@def_function.function
def set_step(step):
summary_ops.set_step(step)
return summary_ops.get_step()
@def_function.function
def get_and_increment():
summary_ops.get_step().assign_add(1)
return summary_ops.get_step()
mystep = variables.Variable(0)
self.assertAllEqual(0, set_step(mystep))
self.assertAllEqual(0, summary_ops.get_step().read_value())
self.assertAllEqual(1, get_and_increment())
self.assertAllEqual(2, get_and_increment())
# Check that set_step() properly maintains reference to variable.
del mystep
self.assertAllEqual(3, get_and_increment())
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.also_run_as_tf_function
def testSummaryScope(self):
with summary_ops.summary_scope('foo') as (tag, scope):
self.assertEqual('foo', tag)
self.assertEqual('foo/', scope)
with summary_ops.summary_scope('bar') as (tag, scope):
self.assertEqual('foo/bar', tag)
self.assertEqual('foo/bar/', scope)
with summary_ops.summary_scope('with/slash') as (tag, scope):
self.assertEqual('foo/with/slash', tag)
self.assertEqual('foo/with/slash/', scope)
with ops.name_scope(None):
with summary_ops.summary_scope('unnested') as (tag, scope):
self.assertEqual('unnested', tag)
self.assertEqual('unnested/', scope)
@test_util.also_run_as_tf_function
def testSummaryScope_defaultName(self):
with summary_ops.summary_scope(None) as (tag, scope):
self.assertEqual('summary', tag)
self.assertEqual('summary/', scope)
with summary_ops.summary_scope(None, 'backup') as (tag, scope):
self.assertEqual('backup', tag)
self.assertEqual('backup/', scope)
@test_util.also_run_as_tf_function
def testSummaryScope_handlesCharactersIllegalForScope(self):
with summary_ops.summary_scope('f?o?o') as (tag, scope):
self.assertEqual('f?o?o', tag)
self.assertEqual('foo/', scope)
# If all characters aren't legal for a scope name, use default name.
with summary_ops.summary_scope('???', 'backup') as (tag, scope):
self.assertEqual('???', tag)
self.assertEqual('backup/', scope)
@test_util.also_run_as_tf_function
def testSummaryScope_nameNotUniquifiedForTag(self):
constant_op.constant(0, name='foo')
with summary_ops.summary_scope('foo') as (tag, _):
self.assertEqual('foo', tag)
with summary_ops.summary_scope('foo') as (tag, _):
self.assertEqual('foo', tag)
with ops.name_scope('with'):
constant_op.constant(0, name='slash')
with summary_ops.summary_scope('with/slash') as (tag, _):
self.assertEqual('with/slash', tag)
class SummaryWriterTest(test_util.TensorFlowTestCase):
def testCreate_withInitAndClose(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(
logdir, max_queue=1000, flush_millis=1000000)
get_total = lambda: len(events_from_logdir(logdir))
self.assertEqual(1, get_total()) # file_version Event
# Calling init() again while writer is open has no effect
writer.init()
self.assertEqual(1, get_total())
with writer.as_default():
summary_ops.write('tag', 1, step=0)
self.assertEqual(1, get_total())
# Calling .close() should do an implicit flush
writer.close()
self.assertEqual(2, get_total())
def testCreate_fromFunction(self):
logdir = self.get_temp_dir()
@def_function.function
def f():
# Returned SummaryWriter must be stored in a non-local variable so it
# lives throughout the function execution.
if not hasattr(f, 'writer'):
f.writer = summary_ops.create_file_writer_v2(logdir)
with context.eager_mode():
f()
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(event_files))
def testCreate_graphTensorArgument_raisesError(self):
logdir = self.get_temp_dir()
with context.graph_mode():
logdir_tensor = constant_op.constant(logdir)
with context.eager_mode():
with self.assertRaisesRegex(
ValueError, 'Invalid graph Tensor argument.*logdir'):
summary_ops.create_file_writer_v2(logdir_tensor)
self.assertEmpty(gfile.Glob(os.path.join(logdir, '*')))
def testCreate_fromFunction_graphTensorArgument_raisesError(self):
logdir = self.get_temp_dir()
@def_function.function
def f():
summary_ops.create_file_writer_v2(constant_op.constant(logdir))
with context.eager_mode():
with self.assertRaisesRegex(
ValueError, 'Invalid graph Tensor argument.*logdir'):
f()
self.assertEmpty(gfile.Glob(os.path.join(logdir, '*')))
def testCreate_fromFunction_unpersistedResource_raisesError(self):
logdir = self.get_temp_dir()
@def_function.function
def f():
with summary_ops.create_file_writer_v2(logdir).as_default():
pass # Calling .as_default() is enough to indicate use.
with context.eager_mode():
# TODO(nickfelt): change this to a better error
with self.assertRaisesRegex(
errors.NotFoundError, 'Resource.*does not exist'):
f()
# Even though we didn't use it, an event file will have been created.
self.assertEqual(1, len(gfile.Glob(os.path.join(logdir, '*'))))
def testCreate_immediateSetAsDefault_retainsReference(self):
logdir = self.get_temp_dir()
try:
with context.eager_mode():
summary_ops.create_file_writer_v2(logdir).set_as_default()
summary_ops.flush()
finally:
# Ensure we clean up no matter how the test executes.
context.context().summary_writer_resource = None
def testCreate_immediateAsDefault_retainsReference(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.flush()
def testNoSharing(self):
# Two writers with the same logdir should not share state.
logdir = self.get_temp_dir()
with context.eager_mode():
writer1 = summary_ops.create_file_writer_v2(logdir)
with writer1.as_default():
summary_ops.write('tag', 1, step=1)
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(event_files))
file1 = event_files[0]
writer2 = summary_ops.create_file_writer_v2(logdir)
with writer2.as_default():
summary_ops.write('tag', 1, step=2)
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(2, len(event_files))
event_files.remove(file1)
file2 = event_files[0]
# Extra writes to ensure interleaved usage works.
with writer1.as_default():
summary_ops.write('tag', 1, step=1)
with writer2.as_default():
summary_ops.write('tag', 1, step=2)
events = iter(events_from_file(file1))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual(1, next(events).step)
self.assertEqual(1, next(events).step)
self.assertRaises(StopIteration, lambda: next(events))
events = iter(events_from_file(file2))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual(2, next(events).step)
self.assertEqual(2, next(events).step)
self.assertRaises(StopIteration, lambda: next(events))
def testNoSharing_fromFunction(self):
logdir = self.get_temp_dir()
@def_function.function
def f1():
if not hasattr(f1, 'writer'):
f1.writer = summary_ops.create_file_writer_v2(logdir)
with f1.writer.as_default():
summary_ops.write('tag', 1, step=1)
@def_function.function
def f2():
if not hasattr(f2, 'writer'):
f2.writer = summary_ops.create_file_writer_v2(logdir)
with f2.writer.as_default():
summary_ops.write('tag', 1, step=2)
with context.eager_mode():
f1()
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(event_files))
file1 = event_files[0]
f2()
event_files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(2, len(event_files))
event_files.remove(file1)
file2 = event_files[0]
# Extra writes to ensure interleaved usage works.
f1()
f2()
events = iter(events_from_file(file1))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual(1, next(events).step)
self.assertEqual(1, next(events).step)
self.assertRaises(StopIteration, lambda: next(events))
events = iter(events_from_file(file2))
self.assertEqual('brain.Event:2', next(events).file_version)
self.assertEqual(2, next(events).step)
self.assertEqual(2, next(events).step)
self.assertRaises(StopIteration, lambda: next(events))
def testMaxQueue(self):
logdir = self.get_temp_dir()
with context.eager_mode():
with summary_ops.create_file_writer_v2(
logdir, max_queue=1, flush_millis=999999).as_default():
get_total = lambda: len(events_from_logdir(logdir))
# Note: First tf.Event is always file_version.
self.assertEqual(1, get_total())
summary_ops.write('tag', 1, step=0)
self.assertEqual(1, get_total())
# Should flush after second summary since max_queue = 1
summary_ops.write('tag', 1, step=0)
self.assertEqual(3, get_total())
def testWriterFlush(self):
logdir = self.get_temp_dir()
get_total = lambda: len(events_from_logdir(logdir))
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(
logdir, max_queue=1000, flush_millis=1000000)
self.assertEqual(1, get_total()) # file_version Event
with writer.as_default():
summary_ops.write('tag', 1, step=0)
self.assertEqual(1, get_total())
writer.flush()
self.assertEqual(2, get_total())
summary_ops.write('tag', 1, step=0)
self.assertEqual(2, get_total())
# Exiting the "as_default()" should do an implicit flush
self.assertEqual(3, get_total())
def testFlushFunction(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(
logdir, max_queue=999999, flush_millis=999999)
with writer.as_default():
get_total = lambda: len(events_from_logdir(logdir))
# Note: First tf.Event is always file_version.
self.assertEqual(1, get_total())
summary_ops.write('tag', 1, step=0)
summary_ops.write('tag', 1, step=0)
self.assertEqual(1, get_total())
summary_ops.flush()
self.assertEqual(3, get_total())
# Test "writer" parameter
summary_ops.write('tag', 1, step=0)
self.assertEqual(3, get_total())
summary_ops.flush(writer=writer)
self.assertEqual(4, get_total())
summary_ops.write('tag', 1, step=0)
self.assertEqual(4, get_total())
summary_ops.flush(writer=writer._resource) # pylint:disable=protected-access
self.assertEqual(5, get_total())
@test_util.assert_no_new_tensors
def testNoMemoryLeak_graphMode(self):
logdir = self.get_temp_dir()
with context.graph_mode(), ops.Graph().as_default():
summary_ops.create_file_writer_v2(logdir)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testNoMemoryLeak_eagerMode(self):
logdir = self.get_temp_dir()
with summary_ops.create_file_writer_v2(logdir).as_default():
summary_ops.write('tag', 1, step=0)
def testClose_preventsLaterUse(self):
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
writer.close()
writer.close() # redundant close() is a no-op
writer.flush() # redundant flush() is a no-op
with self.assertRaisesRegex(RuntimeError, 'already closed'):
writer.init()
with self.assertRaisesRegex(RuntimeError, 'already closed'):
with writer.as_default():
self.fail('should not get here')
with self.assertRaisesRegex(RuntimeError, 'already closed'):
writer.set_as_default()
def testClose_closesOpenFile(self):
try:
import psutil # pylint: disable=g-import-not-at-top
except ImportError:
raise unittest.SkipTest('test requires psutil')
proc = psutil.Process()
get_open_filenames = lambda: set(info[0] for info in proc.open_files())
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(files))
eventfile = files[0]
self.assertIn(eventfile, get_open_filenames())
writer.close()
self.assertNotIn(eventfile, get_open_filenames())
def testDereference_closesOpenFile(self):
try:
import psutil # pylint: disable=g-import-not-at-top
except ImportError:
raise unittest.SkipTest('test requires psutil')
proc = psutil.Process()
get_open_filenames = lambda: set(info[0] for info in proc.open_files())
logdir = self.get_temp_dir()
with context.eager_mode():
writer = summary_ops.create_file_writer_v2(logdir)
files = gfile.Glob(os.path.join(logdir, '*'))
self.assertEqual(1, len(files))
eventfile = files[0]
self.assertIn(eventfile, get_open_filenames())
del writer
self.assertNotIn(eventfile, get_open_filenames())
class SummaryOpsTest(test_util.TensorFlowTestCase):
def tearDown(self):
summary_ops.trace_off()
def run_metadata(self, *args, **kwargs):
assert context.executing_eagerly()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir)
with writer.as_default():
summary_ops.run_metadata(*args, **kwargs)
writer.close()
events = events_from_logdir(logdir)
return events[1]
def run_metadata_graphs(self, *args, **kwargs):
assert context.executing_eagerly()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir)
with writer.as_default():
summary_ops.run_metadata_graphs(*args, **kwargs)
writer.close()
events = events_from_logdir(logdir)
return events[1]
def create_run_metadata(self):
step_stats = step_stats_pb2.StepStats(dev_stats=[
step_stats_pb2.DeviceStepStats(
device='cpu:0',
node_stats=[step_stats_pb2.NodeExecStats(node_name='hello')])
])
return config_pb2.RunMetadata(
function_graphs=[
config_pb2.RunMetadata.FunctionGraphs(
pre_optimization_graph=graph_pb2.GraphDef(
node=[node_def_pb2.NodeDef(name='foo')]))
],
step_stats=step_stats)
def keras_model(self, *args, **kwargs):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir)
with writer.as_default():
summary_ops.keras_model(*args, **kwargs)
writer.close()
events = events_from_logdir(logdir)
# The first event contains no summary values. The written content goes to
# the second event.
return events[1]
def run_trace(self, f, step=1):
assert context.executing_eagerly()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir)
summary_ops.trace_on(graph=True, profiler=False)
with writer.as_default():
f()
summary_ops.trace_export(name='foo', step=step)
writer.close()
events = events_from_logdir(logdir)
return events[1]
@test_util.run_v2_only
def testRunMetadata_usesNameAsTag(self):
meta = config_pb2.RunMetadata()
with ops.name_scope('foo'):
event = self.run_metadata(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
self.assertEqual('foo/my_name', first_val.tag)
@test_util.run_v2_only
def testRunMetadata_summaryMetadata(self):
expected_summary_metadata = """
plugin_data {
plugin_name: "graph_run_metadata"
content: "1"
}
"""
meta = config_pb2.RunMetadata()
event = self.run_metadata(name='my_name', data=meta, step=1)
actual_summary_metadata = event.summary.value[0].metadata
self.assertProtoEquals(expected_summary_metadata, actual_summary_metadata)
@test_util.run_v2_only
def testRunMetadata_wholeRunMetadata(self):
expected_run_metadata = """
step_stats {
dev_stats {
device: "cpu:0"
node_stats {
node_name: "hello"
}
}
}
function_graphs {
pre_optimization_graph {
node {
name: "foo"
}
}
}
"""
meta = self.create_run_metadata()
event = self.run_metadata(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
actual_run_metadata = config_pb2.RunMetadata.FromString(
first_val.tensor.string_val[0])
self.assertProtoEquals(expected_run_metadata, actual_run_metadata)
@test_util.run_v2_only
def testRunMetadata_usesDefaultStep(self):
meta = config_pb2.RunMetadata()
try:
summary_ops.set_step(42)
event = self.run_metadata(name='my_name', data=meta)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.run_v2_only
def testRunMetadataGraph_usesNameAsTag(self):
meta = config_pb2.RunMetadata()
with ops.name_scope('foo'):
event = self.run_metadata_graphs(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
self.assertEqual('foo/my_name', first_val.tag)
@test_util.run_v2_only
def testRunMetadataGraph_summaryMetadata(self):
expected_summary_metadata = """
plugin_data {
plugin_name: "graph_run_metadata_graph"
content: "1"
}
"""
meta = config_pb2.RunMetadata()
event = self.run_metadata_graphs(name='my_name', data=meta, step=1)
actual_summary_metadata = event.summary.value[0].metadata
self.assertProtoEquals(expected_summary_metadata, actual_summary_metadata)
@test_util.run_v2_only
def testRunMetadataGraph_runMetadataFragment(self):
expected_run_metadata = """
function_graphs {
pre_optimization_graph {
node {
name: "foo"
}
}
}
"""
meta = self.create_run_metadata()
event = self.run_metadata_graphs(name='my_name', data=meta, step=1)
first_val = event.summary.value[0]
actual_run_metadata = config_pb2.RunMetadata.FromString(
first_val.tensor.string_val[0])
self.assertProtoEquals(expected_run_metadata, actual_run_metadata)
@test_util.run_v2_only
def testRunMetadataGraph_usesDefaultStep(self):
meta = config_pb2.RunMetadata()
try:
summary_ops.set_step(42)
event = self.run_metadata_graphs(name='my_name', data=meta)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.run_v2_only
def testKerasModel(self):
model = Sequential(
[Dense(10, input_shape=(100,)),
Activation('relu', name='my_relu')])
event = self.keras_model(name='my_name', data=model, step=1)
first_val = event.summary.value[0]
self.assertEqual(model.to_json(), first_val.tensor.string_val[0].decode())
@test_util.run_v2_only
def testKerasModel_usesDefaultStep(self):
model = Sequential(
[Dense(10, input_shape=(100,)),
Activation('relu', name='my_relu')])
try:
summary_ops.set_step(42)
event = self.keras_model(name='my_name', data=model)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@test_util.run_v2_only
def testKerasModel_subclass(self):
class SimpleSubclass(Model):
def __init__(self):
super(SimpleSubclass, self).__init__(name='subclass')
self.dense = Dense(10, input_shape=(100,))
self.activation = Activation('relu', name='my_relu')
def call(self, inputs):
x = self.dense(inputs)
return self.activation(x)
model = SimpleSubclass()
with test.mock.patch.object(logging, 'warn') as mock_log:
self.assertFalse(
summary_ops.keras_model(name='my_name', data=model, step=1))
self.assertRegexpMatches(
str(mock_log.call_args), 'Model failed to serialize as JSON.')
@test_util.run_v2_only
def testKerasModel_otherExceptions(self):
model = Sequential()
with test.mock.patch.object(model, 'to_json') as mock_to_json:
with test.mock.patch.object(logging, 'warn') as mock_log:
mock_to_json.side_effect = Exception('oops')
self.assertFalse(
summary_ops.keras_model(name='my_name', data=model, step=1))
self.assertRegexpMatches(
str(mock_log.call_args),
'Model failed to serialize as JSON. Ignoring... oops')
@test_util.run_v2_only
def testTrace(self):
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
event = self.run_trace(f)
first_val = event.summary.value[0]
actual_run_metadata = config_pb2.RunMetadata.FromString(
first_val.tensor.string_val[0])
# Content of function_graphs is large and, for instance, device can change.
self.assertTrue(hasattr(actual_run_metadata, 'function_graphs'))
@test_util.run_v2_only
def testTrace_cannotEnableTraceInFunction(self):
@def_function.function
def f():
summary_ops.trace_on(graph=True, profiler=False)
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
with test.mock.patch.object(logging, 'warn') as mock_log:
f()
self.assertRegexpMatches(
str(mock_log.call_args), 'Cannot enable trace inside a tf.function.')
@test_util.run_v2_only
def testTrace_cannotEnableTraceInGraphMode(self):
with test.mock.patch.object(logging, 'warn') as mock_log:
with context.graph_mode():
summary_ops.trace_on(graph=True, profiler=False)
self.assertRegexpMatches(
str(mock_log.call_args), 'Must enable trace in eager mode.')
@test_util.run_v2_only
def testTrace_cannotExportTraceWithoutTrace(self):
with six.assertRaisesRegex(self, ValueError,
'Must enable trace before export.'):
summary_ops.trace_export(name='foo', step=1)
@test_util.run_v2_only
def testTrace_cannotExportTraceInFunction(self):
summary_ops.trace_on(graph=True, profiler=False)
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
summary_ops.trace_export(name='foo', step=1)
return x**y
with test.mock.patch.object(logging, 'warn') as mock_log:
f()
self.assertRegexpMatches(
str(mock_log.call_args),
'Cannot export trace inside a tf.function.')
@test_util.run_v2_only
def testTrace_cannotExportTraceInGraphMode(self):
with test.mock.patch.object(logging, 'warn') as mock_log:
with context.graph_mode():
summary_ops.trace_export(name='foo', step=1)
self.assertRegexpMatches(
str(mock_log.call_args),
'Can only export trace while executing eagerly.')
@test_util.run_v2_only
def testTrace_usesDefaultStep(self):
@def_function.function
def f():
x = constant_op.constant(2)
y = constant_op.constant(3)
return x**y
try:
summary_ops.set_step(42)
event = self.run_trace(f, step=None)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
def events_from_file(filepath):
"""Returns all events in a single event file.
Args:
filepath: Path to the event file.
Returns:
A list of all tf.Event protos in the event file.
"""
records = list(tf_record.tf_record_iterator(filepath))
result = []
for r in records:
event = event_pb2.Event()
event.ParseFromString(r)
result.append(event)
return result
def events_from_logdir(logdir):
"""Returns all events in the single eventfile in logdir.
Args:
logdir: The directory in which the single event file is sought.
Returns:
A list of all tf.Event protos from the single event file.
Raises:
AssertionError: If logdir does not contain exactly one file.
"""
assert gfile.Exists(logdir)
files = gfile.ListDirectory(logdir)
assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files
return events_from_file(os.path.join(logdir, files[0]))
def to_numpy(summary_value):
return tensor_util.MakeNdarray(summary_value.tensor)
if __name__ == '__main__':
test.main()
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrapper for running the test under heapchecker and analyzing the output."""
import datetime
import logging
import os
import re
import common
import path_utils
import suppressions
class HeapcheckWrapper(object):
TMP_FILE = 'heapcheck.log'
SANITY_TEST_SUPPRESSION = "Heapcheck sanity test"
LEAK_REPORT_RE = re.compile(
'Leak of ([0-9]*) bytes in ([0-9]*) objects allocated from:')
STACK_LINE_RE = re.compile('\s*@\s*(?:0x)?[0-9a-fA-F]+\s*([^\n]*)')
BORING_CALLERS = common.BoringCallers(mangled=False, use_re_wildcards=True)
def __init__(self, supp_files):
self._mode = 'strict'
self._timeout = 1800
self._nocleanup_on_exit = False
self._suppressions = []
for fname in supp_files:
self._suppressions.extend(suppressions.ReadSuppressionsFromFile(fname))
if os.path.exists(self.TMP_FILE):
os.remove(self.TMP_FILE)
def PutEnvAndLog(self, env_name, env_value):
"""Sets the env var |env_name| to |env_value| and writes to logging.info.
"""
os.putenv(env_name, env_value)
logging.info('export %s=%s', env_name, env_value)
def Execute(self):
"""Executes the app to be tested."""
logging.info('starting execution...')
proc = ['sh', path_utils.ScriptDir() + '/heapcheck_std.sh']
proc += self._args
self.PutEnvAndLog('G_SLICE', 'always-malloc')
self.PutEnvAndLog('NSS_DISABLE_ARENA_FREE_LIST', '1')
self.PutEnvAndLog('NSS_DISABLE_UNLOAD', '1')
self.PutEnvAndLog('GTEST_DEATH_TEST_USE_FORK', '1')
self.PutEnvAndLog('HEAPCHECK', self._mode)
self.PutEnvAndLog('HEAP_CHECK_ERROR_EXIT_CODE', '0')
self.PutEnvAndLog('HEAP_CHECK_MAX_LEAKS', '-1')
self.PutEnvAndLog('KEEP_SHADOW_STACKS', '1')
self.PutEnvAndLog('PPROF_PATH',
path_utils.ScriptDir() +
'/../../third_party/tcmalloc/chromium/src/pprof')
self.PutEnvAndLog('LD_LIBRARY_PATH',
'/usr/lib/debug/:/usr/lib32/debug/')
# CHROME_DEVEL_SANDBOX causes problems with heapcheck
self.PutEnvAndLog('CHROME_DEVEL_SANDBOX', '');
return common.RunSubprocess(proc, self._timeout)
def Analyze(self, log_lines, check_sanity=False):
"""Analyzes the app's output and applies suppressions to the reports.
Analyze() searches the logs for leak reports and tries to apply
suppressions to them. Unsuppressed reports and other log messages are
dumped as is.
If |check_sanity| is True, the list of suppressed reports is searched for a
report starting with SANITY_TEST_SUPPRESSION. If there isn't one, Analyze
returns 2 regardless of the unsuppressed reports.
Args:
log_lines: An iterator over the app's log lines.
check_sanity: A flag that determines whether we should check the tool's
sanity.
Returns:
2, if the sanity check fails,
1, if unsuppressed reports remain in the output and the sanity check
passes,
0, if all the errors are suppressed and the sanity check passes.
"""
return_code = 0
# leak signature: [number of bytes, number of objects]
cur_leak_signature = None
cur_stack = []
cur_report = []
reported_hashes = {}
# Statistics grouped by suppression description:
# [hit count, bytes, objects].
used_suppressions = {}
for line in log_lines:
line = line.rstrip() # remove the trailing \n
match = self.STACK_LINE_RE.match(line)
if match:
cur_stack.append(match.groups()[0])
cur_report.append(line)
continue
else:
if cur_stack:
# Try to find the suppression that applies to the current leak stack.
description = ''
for supp in self._suppressions:
if supp.Match(cur_stack):
cur_stack = []
description = supp.description
break
if cur_stack:
if not cur_leak_signature:
print 'Missing leak signature for the following stack: '
for frame in cur_stack:
print ' ' + frame
print 'Aborting...'
return 3
# Drop boring callers from the stack to get less redundant info
# and fewer unique reports.
found_boring = False
for i in range(1, len(cur_stack)):
for j in self.BORING_CALLERS:
if re.match(j, cur_stack[i]):
cur_stack = cur_stack[:i]
cur_report = cur_report[:i]
found_boring = True
break
if found_boring:
break
error_hash = hash("".join(cur_stack)) & 0xffffffffffffffff
if error_hash not in reported_hashes:
reported_hashes[error_hash] = 1
# Print the report and set the return code to 1.
print ('Leak of %d bytes in %d objects allocated from:'
% tuple(cur_leak_signature))
print '\n'.join(cur_report)
return_code = 1
# Generate the suppression iff the stack contains more than one
# frame (otherwise it's likely to be broken)
if len(cur_stack) > 1 or found_boring:
print '\nSuppression (error hash=#%016X#):\n{' % (error_hash)
print ' <insert_a_suppression_name_here>'
print ' Heapcheck:Leak'
for frame in cur_stack:
print ' fun:' + frame
print '}\n\n'
else:
print ('This stack may be broken due to omitted frame pointers.'
' It is not recommended to suppress it.\n')
else:
# Update the suppressions histogram.
if description in used_suppressions:
hits, bytes, objects = used_suppressions[description]
hits += 1
bytes += cur_leak_signature[0]
objects += cur_leak_signature[1]
used_suppressions[description] = [hits, bytes, objects]
else:
used_suppressions[description] = [1] + cur_leak_signature
cur_stack = []
cur_report = []
cur_leak_signature = None
match = self.LEAK_REPORT_RE.match(line)
if match:
cur_leak_signature = map(int, match.groups())
else:
print line
# Print the list of suppressions used.
is_sane = False
if used_suppressions:
print
print '-----------------------------------------------------'
print 'Suppressions used:'
print ' count bytes objects name'
histo = {}
for description in used_suppressions:
if description.startswith(HeapcheckWrapper.SANITY_TEST_SUPPRESSION):
is_sane = True
hits, bytes, objects = used_suppressions[description]
line = '%8d %8d %8d %s' % (hits, bytes, objects, description)
if hits in histo:
histo[hits].append(line)
else:
histo[hits] = [line]
keys = histo.keys()
keys.sort()
for count in keys:
for line in histo[count]:
print line
print '-----------------------------------------------------'
if check_sanity and not is_sane:
logging.error("Sanity check failed")
return 2
else:
return return_code
def RunTestsAndAnalyze(self, check_sanity):
exec_retcode = self.Execute()
log_file = file(self.TMP_FILE, 'r')
analyze_retcode = self.Analyze(log_file, check_sanity)
log_file.close()
if analyze_retcode:
logging.error("Analyze failed.")
return analyze_retcode
if exec_retcode:
logging.error("Test execution failed.")
return exec_retcode
else:
logging.info("Test execution completed successfully.")
return 0
def Main(self, args, check_sanity=False):
self._args = args
start = datetime.datetime.now()
retcode = -1
retcode = self.RunTestsAndAnalyze(check_sanity)
end = datetime.datetime.now()
seconds = (end - start).seconds
hours = seconds / 3600
seconds %= 3600
minutes = seconds / 60
seconds %= 60
logging.info('elapsed time: %02d:%02d:%02d', hours, minutes, seconds)
logging.info('For more information on the Heapcheck bot see '
'http://dev.chromium.org/developers/how-tos/'
'using-the-heap-leak-checker')
return retcode
def RunTool(args, supp_files, module):
tool = HeapcheckWrapper(supp_files)
MODULES_TO_SANITY_CHECK = ["base"]
check_sanity = module in MODULES_TO_SANITY_CHECK
return tool.Main(args[1:], check_sanity)
|
|
import sys, select, time, socket, traceback
class SEND:
def __init__( self, sock, timeout ):
self.fileno = sock.fileno()
self.expire = time.time() + timeout
def __str__( self ):
return 'SEND(%i,%s)' % ( self.fileno, time.strftime( '%H:%M:%S', time.localtime( self.expire ) ) )
class RECV:
def __init__( self, sock, timeout ):
self.fileno = sock.fileno()
self.expire = time.time() + timeout
def __str__( self ):
return 'RECV(%i,%s)' % ( self.fileno, time.strftime( '%H:%M:%S', time.localtime( self.expire ) ) )
class WAIT:
def __init__( self, timeout = None ):
self.expire = timeout and time.time() + timeout or None
def __str__( self ):
return 'WAIT(%s)' % ( self.expire and time.strftime( '%H:%M:%S', time.localtime( self.expire ) ) )
class Fiber:
def __init__( self, generator ):
self.__generator = generator
self.state = WAIT()
def step( self, throw=None ):
self.state = None
try:
if throw:
assert hasattr( self.__generator, 'throw' ), throw
self.__generator.throw( AssertionError, throw )
state = self.__generator.next()
assert isinstance( state, (SEND, RECV, WAIT) ), 'invalid waiting state %r' % state
self.state = state
except KeyboardInterrupt:
raise
except StopIteration:
del self.__generator
pass
except AssertionError, msg:
print 'Error:', msg
except:
traceback.print_exc()
def __repr__( self ):
return '%i: %s' % ( self.__generator.gi_frame.f_lineno, self.state )
class GatherFiber( Fiber ):
def __init__( self, generator ):
Fiber.__init__( self, generator )
self.__chunks = [ '[ 0.00 ] %s\n' % time.ctime() ]
self.__start = time.time()
self.__newline = True
def step( self, throw=None ):
stdout = sys.stdout
stderr = sys.stderr
try:
sys.stdout = sys.stderr = self
Fiber.step( self, throw )
finally:
sys.stdout = stdout
sys.stderr = stderr
def write( self, string ):
if self.__newline:
self.__chunks.append( '%6.2f ' % ( time.time() - self.__start ) )
self.__chunks.append( string )
self.__newline = string.endswith( '\n' )
def __del__( self ):
sys.stdout.writelines( self.__chunks )
if not self.__newline:
sys.stdout.write( '\n' )
class DebugFiber( Fiber ):
id = 0
def __init__( self, generator ):
Fiber.__init__( self, generator )
self.__id = DebugFiber.id
sys.stdout.write( '[ %04X ] %s\n' % ( self.__id, time.ctime() ) )
self.__newline = True
self.__stdout = sys.stdout
DebugFiber.id = ( self.id + 1 ) % 65535
def step( self, throw=None ):
stdout = sys.stdout
stderr = sys.stderr
try:
sys.stdout = sys.stderr = self
Fiber.step( self, throw )
if self.state:
print 'Waiting at', self
finally:
sys.stdout = stdout
sys.stderr = stderr
def write( self, string ):
if self.__newline:
self.__stdout.write( ' %04X ' % self.__id )
self.__stdout.write( string )
self.__newline = string.endswith( '\n' )
def spawn( generator, port, debug ):
try:
listener = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
listener.setblocking( 0 )
listener.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, listener.getsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR ) | 1 )
listener.bind( ( '', port ) )
listener.listen( 5 )
except Exception, e:
print 'error: failed to create socket:', e
return False
if debug:
myFiber = DebugFiber
else:
myFiber = GatherFiber
print ' .... Server started'
try:
fibers = []
while True:
tryrecv = { listener.fileno(): None }
trysend = {}
expire = None
now = time.time()
i = len( fibers )
while i:
i -= 1
state = fibers[ i ].state
if state and now > state.expire:
if isinstance( state, WAIT ):
fibers[ i ].step()
else:
fibers[ i ].step( throw='connection timed out' )
state = fibers[ i ].state
if not state:
del fibers[ i ]
continue
if isinstance( state, RECV ):
tryrecv[ state.fileno ] = fibers[ i ]
elif isinstance( state, SEND ):
trysend[ state.fileno ] = fibers[ i ]
elif state.expire is None:
continue
if state.expire < expire or expire is None:
expire = state.expire
if expire is None:
print '[ IDLE ]', time.ctime()
sys.stdout.flush()
canrecv, cansend, dummy = select.select( tryrecv, trysend, [] )
print '[ BUSY ]', time.ctime()
sys.stdout.flush()
else:
canrecv, cansend, dummy = select.select( tryrecv, trysend, [], max( expire - now, 0 ) )
for fileno in canrecv:
if fileno is listener.fileno():
fibers.append( myFiber( generator( *listener.accept() ) ) )
else:
tryrecv[ fileno ].step()
for fileno in cansend:
trysend[ fileno ].step()
except KeyboardInterrupt:
print ' .... Server terminated'
return True
except:
print ' .... Server crashed'
traceback.print_exc( file=sys.stdout )
return False
|
|
# -*- coding: utf-8 -*-
# remimplement gpoline.icn and digest.cin python.
# This will implement basic functionality of converting gpo locator codes
# to html matching thomas/lis .
#
import re
import logging
logging.basicConfig(format='%(levelname)s %(pathname)s %(lineno)s:%(message)s', level=logging.DEBUG)
#logging.basicConfig(format='%(levelname)s %(filename)s %(lineno)s:%(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
from itertools import zip_longest
# Globals used in this module.
# char(0) char(10) char(13)
# char(27) char(28) char(127)
REMOVE_CHARS = [b'\x000', b'\x00A', b'\x00D',
b'\x01B', b'\x01C', b'\xac',
b'\x07F\d+', #seems to appear at the head of a page/section
b'\x07S\d+', # subformat codes?
#b'\xad',
#b'\xa8'
]
# translate mapping tab to space
MAPPING = {b'\x009': b' ', # tab to space
b'\xff09':b'–' , # b'\xff09': u'\u2013',
b'\x19': b' ' , # hex 19 End of Medium? change to space i.e. Page\x19S2128 -> Page S2128
b'\x5f': b'--' , # replace underbar with double hyphen??? TODO: check
b'\x18': b'<br />' , # \x18 CANCEL-> <br/> ?
b'\x1a': b'<br />' , # \x1a Substitute -> <br/> ?
#b'\xff': b'ÿ' , # y with .. dots over it. used to indicate accents
}
def grouper(iterable, n, fillvalue=None):
'''Collect data into fixed-length chunks or blocks,
Modified recipe from http://docs.python.org/3/library/itertools.html#recipes
grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
'''
args = [ filter( lambda x: x!=b'' , iter(iterable)) ] * n
#args = [ iter(iterable) ] * n
return zip_longest(*args, fillvalue=fillvalue)
def remove_chars(line, remove_chars, to_string=b''):
'''Remove a list of chars from a line
replacing with empty string by default or passed in variable
.'''
for character in (remove_chars):
m = re.search(character, line)
if m:
line = re.sub(character, to_string, line)
logger.debug(b"Replaced [%s] with [%s] in input", character, to_string)
return line
def translate_chars(line, mapping):
'''Given a mapping dictionary, replace all keys found in the line
with the matching values.
'''
for k, v in mapping.items():
line = remove_chars(line, [k], v)
return line
def find_page(data):
'''I90.*\{(D\d+)\}
'''
page = None
#m = re.match(b'.+?\x07I90.+?\{(D\d+)\}.+?', data)
m = re.match(b'\x07I90.+?\{(D\d+)\}', data)
if m:
page = m.group(1)
logger.debug("find_page->(%s)", page)
return page, m
def find_locators(line):
'''given a line return a regex match for locator codes in the line.
match.start gives position start, match.end gives position end
match.group('locator') gives the locator code without the Bell
'''
code = None
m = re.finditer(b'\x07(?P<locator>I67H|I66F|T\d?|g\d{0,3}|[a-su-zA-SU-Z]\d{0,2})', line)
if m:
code = m
return code
def find_escape(line, current_grid=b'G1'):
''' Escape sequences usually replace a preceding char, like an accented
e in resume or in foreign accented chars.
'''
code = None
#m = re.finditer(b'(?P<replace>\w)?\xff(?P<esc>\w{2,3})', line)
m = re.finditer(b'(?P<replace>.?)\xff(?P<esc>AE\d|AF\d|0\d|\dA|E\d)', line)
if m:
code = m
return code
def translate_locator(locator, grid=b'G2',
locator_table=None,
font_table=None):
''' A locator code has a start and end conversion (html) and a grid code.
The Grid code controls which Font locator code conversion to use.
so if the active locator code has a grid G2 we use the font code for grid G2.
The default grid is G2.
'''
converted_code = locator_table.get(locator)
if not converted_code:
#check if a font, using passed in grid:
font_grid = font_table.get(grid)
if font_grid:
converted_code= font_grid.get(locator)
if converted_code:
return converted_code
else:
return {'start':'', 'end':'', 'grid':grid }
import sys
def output(input_line, prefix=None, postfix=None, outf=sys.stdout):
''' Print output to filehandle outf or sys.stdout if no filehandle
passed in. Attempt to convert bytes from latin1 to utf-8.
prefix is printed prior to input_line, postfix is print after input_line.
'''
if not postfix:
postfix = ''
_output(input_line, prefix, postfix, outf)
def _output(input_line, prefix=None, postfix=None, outf=sys.stdout):
#logger.debug("[%s] %s [%s]", prefix, input_line, postfix)
if isinstance(input_line, bytes):
line = input_line.decode('latin1').encode('utf-8')
else:
line = input_line
if line and line != b'':
if prefix:
logger.debug("%s", prefix)
if isinstance(prefix, bytes) :
outf.write (prefix.decode("utf-8"))
else:
outf.write (line)
if isinstance(line, bytes) :
outf.write (line.decode("utf-8"))
else:
outf.write (line)
if postfix:
logger.debug("%s", postfix)
if isinstance(postfix, bytes) :
outf.write (postfix.decode("utf-8"))
else:
outf.write (postfix)
ESCAPE_SEQUENCES = {#esc # action
b'1A' : { 'desc' :'Thin space' , 'html':b' ' },
b'09' : { 'desc' :'N dash' , 'html':b'–' }, #TODO: check
# 08 not clear what to do All Mark
b'AF' : { 'desc' :'copywright' , 'html':b'©' },
b'0A' : { 'desc' :'Multiplication', 'html':b'×' },
# esc # replace # action
b'AE0': { 'S': {'desc' :'breve' , 'html':b'©' },
's': {'desc' :'breve' , 'html':b'š' },
},
b'AE1':{ b'A': {'desc' :'acute' , 'html':b'Á' },
b'E': {'desc' :'acute' , 'html':b'É' },
b'I': {'desc' :'acute' , 'html':b'Í' },
b'O': {'desc' :'acute' , 'html':b'Ó' },
b'U': {'desc' :'acute' , 'html':b'Ú' },
b'Y': {'desc' :'acute' , 'html':b'Ý' },
b'c': {'desc' :'acute' , 'html':b'c´' }, #TODO check
b's': {'desc' :'acute' , 'html':b's´' }, #TODO check
b'a': {'desc' :'acute' , 'html':b'á' },
b'e': {'desc' :'acute' , 'html':b'é' },
b'i': {'desc' :'acute' , 'html':b'í' },
b'o': {'desc' :'acute' , 'html':b'ó' },
b'u': {'desc' :'acute' , 'html':b'ú' },
b'y': {'desc' :'acute' , 'html':b'ý' },
}
}
def process_escapes(found, orig_line, current_start, current_line, current_grid , escape_sequences=ESCAPE_SEQUENCES ):
''' if current_grid > 4 then do something else
do our conversions see documentation..found on cornell law site
<replace char>\xff<esc>
escape_sequences.get(esc).get(<replace_char>).get('html')
or
\xff<esc>
escape_sequences.get(esc).get('html')
'''
logger.debug("process esc:%s", current_line)
output = current_line
logger.debug("Current_grid[1:]:%s ", current_grid[1:])
if int(current_grid[1:]) <= 4:
replace = found.group('replace')
esc = found.group('esc')
if esc:
'''2 types of esc sequences:
One where we match first the
esc char and then the replace char to get the action part of the
dictionary.
Two: just match the esc char and then we get the action part of the
dictionary.
'''
temp = escape_sequences.get(esc, {'desc': 'default', 'html': b'' } )
# if the temp action has a dictionary matching the replace char,
# make that dictionary the action
action = temp.get(replace)
keep_replacement = False
if not action:
# otherwise the action is the dictionary returned by the escape
# sequence above (or the default, and empty space) and we keep
# the replace char if existing in the output.
action = temp
keep_replacement= True
logger.debug("process esc: esc:%s, replace:%s action:%s", esc, replace, action)
replace_with_html = action.get('html')
if keep_replacement:
output = none2empty(orig_line[current_start:found.start()]) + none2empty(replace) + none2empty(replace_with_html)
else :
output = orig_line[current_start:found.start()] + replace_with_html
if action.get('desc') == 'default':
logger.warning("No translation from %s, defaulting to empty space..", esc)
current_start = found.end()
logger.debug("output:%s", output)
return output, current_start
def none2empty(input):
if input:
return input
return b''
def process_escapes_in_line(line, current_grid, escape_sequences=None):
if not escape_sequences:
escape_sequences = ESCAPE_SEQUENCES
current_start = 0
current_line = b''
for afound in find_escape(line, current_grid):
logger.debug("line:%s", line)
a_current_line , current_start = process_escapes(afound, line,
current_start, current_line, current_grid ,
escape_sequences=escape_sequences)
current_line = current_line + a_current_line
logger.debug("after esc :%s", current_line)
if current_start > 0:
current_line = current_line + line[current_start:]
line = current_line
return line
def process_lines(line, current_state, outputf=sys.stdout,
locator_table=None,
font_table=None, postfix=None):
'''For every line process it for locator codes,
Set the current_state to the action's grid,value unless it is a
Font locator (T\d+). We use the grid code of the current locator action
to determine which Font action to use.
action = { 'start': "<h3><em>",'end': "</em></h3>",'grid':"G2",},
current_state = tuple( action,b'G2')
There should only be one locator per line at the begining.
'''
state_stack= []
state_stack.append(current_state)
line_start = 0
current_grid = current_state[1]
for found in find_locators(line):
logger.debug("Found locator:%s", found.group('locator'))
action = translate_locator(found.group('locator'), grid=current_grid,
locator_table=locator_table, font_table=font_table)
if action:
logger.debug("Found Action:%s" , action)
current_action = current_state[0]
line, line_start = process_actions(found, line, line_start, current_action, action, outputf=outputf)
# Not a font locator code:
if found.group('locator')[0] != 'T':
# set the current grid equal to the locator codes grid code.
current_grid = action.get('grid')
current_state = ( action, current_grid )
state_stack.append(current_state)
if line:
line = process_escapes_in_line(line, current_grid)
output_line = line[line_start:]
output (output_line, postfix=postfix,outf=outputf)
else:
output_line = None
return state_stack, output_line
def process_actions(found, line, line_start, current_state, actions, outputf=None):
''' Process a given locator code according to the actions object
an action is the current locator state from FONT_TABLE or LOCATOR_TABLE
'''
locator = found.group('locator')
if actions.get('start-preprocess'):
line = actions.get('start-preprocess')(line)
if not line:
line_start = None
if line:
pattern_start = found.start()
pattern_end = found.end()
output( line[line_start:pattern_start],outf=outputf)
line_start = pattern_end
if current_state and current_state.get('end'):
logger.debug("\tcurrent_state.end:%s" , current_state.get('end'))
output (current_state.get('end'),outf=outputf)
logger.debug("\tlocator:%s action start:%s", locator, actions.get('start'))
output ( actions.get('start'),outf=outputf)
output (line[line_start:pattern_start],outf=outputf)
return line, line_start
|
|
import pytest
from plenum.common.messages.internal_messages import NewViewCheckpointsApplied
from plenum.common.messages.node_messages import OldViewPrePrepareRequest, OldViewPrePrepareReply
from plenum.common.startable import Mode, Status
from plenum.server.consensus.consensus_shared_data import ConsensusSharedData
from plenum.server.consensus.ordering_service_msg_validator import OrderingServiceMsgValidator
from plenum.server.replica_helper import generateName
from plenum.server.replica_validator_enums import PROCESS, DISCARD, STASH_CATCH_UP, STASH_WATERMARKS, \
STASH_VIEW_3PC, OLD_VIEW, OUTSIDE_WATERMARKS, ALREADY_ORDERED, CATCHING_UP, FUTURE_VIEW, \
WAITING_FOR_NEW_VIEW, NON_MASTER, INCORRECT_INSTANCE, STASH_WAITING_FIRST_BATCH_IN_VIEW, WAITING_FIRST_BATCH_IN_VIEW
from plenum.test.bls.helper import generate_state_root
from plenum.test.greek import genNodeNames
from plenum.test.helper import create_pre_prepare_no_bls, create_prepare, create_commit_no_bls_sig
@pytest.fixture(scope='function', params=[0, 2])
def view_no(request):
return request.param
@pytest.fixture(scope='function', params=[Mode.starting,
Mode.discovering,
Mode.discovered,
Mode.syncing,
Mode.synced,
Mode.participating])
def mode(request):
return request.param
@pytest.fixture(scope='function', params=[True, False])
def waiting_for_new_view(request):
return request.param
@pytest.fixture(scope='function')
def validator(view_no):
validators = genNodeNames(4)
inst_id = 0
cd = ConsensusSharedData(generateName(validators[0], inst_id), validators, inst_id, True)
cd.pp_seq_no = 1
cd.view_no = view_no
cd.node_mode = Mode.participating
cd.node_status = Status.started
cd.prev_view_prepare_cert = cd.last_ordered_3pc[1]
return OrderingServiceMsgValidator(data=cd)
def pre_prepare(view_no, pp_seq_no, inst_id=0):
return create_pre_prepare_no_bls(generate_state_root(),
view_no=view_no,
pp_seq_no=pp_seq_no,
inst_id=inst_id)
def prepare(view_no, pp_seq_no, inst_id=0):
return create_prepare(req_key=(view_no, pp_seq_no),
state_root=generate_state_root(),
inst_id=inst_id)
def commit(view_no, pp_seq_no, inst_id=0):
return create_commit_no_bls_sig(req_key=(view_no, pp_seq_no),
inst_id=inst_id)
def new_view(view_no):
return NewViewCheckpointsApplied(view_no, [], [], [])
def old_view_pp_req():
return OldViewPrePrepareRequest(0,
[(1, 0, 1, "d1"), (1, 0, 2, "d1")])
def old_view_pp_rep():
pp1 = create_pre_prepare_no_bls(generate_state_root(),
view_no=0,
pp_seq_no=1,
inst_id=0)
pp2 = create_pre_prepare_no_bls(generate_state_root(),
view_no=0,
pp_seq_no=1,
inst_id=0)
return OldViewPrePrepareReply(0,
[pp1, pp2])
def test_process_correct_pre_prepare(validator, view_no):
assert validator.validate_pre_prepare(
pre_prepare(view_no=view_no, pp_seq_no=1)) == (PROCESS, None)
def test_process_correct_prepare(validator, view_no):
assert validator.validate_prepare(
prepare(view_no=view_no, pp_seq_no=1)) == (PROCESS, None)
def test_process_correct_commit(validator, view_no):
assert validator.validate_commit(
commit(view_no=view_no, pp_seq_no=1)) == (PROCESS, None)
def test_process_correct_new_view(validator, view_no):
assert validator.validate_new_view(new_view(view_no=view_no)) == (PROCESS, None)
def test_process_correct_old_view_pp_req(validator, view_no):
assert validator.validate_old_view_prep_prepare_req(old_view_pp_req()) == (PROCESS, None)
def test_process_correct_old_view_pp_rep(validator, view_no):
assert validator.validate_old_view_prep_prepare_rep(old_view_pp_rep()) == (PROCESS, None)
def test_discard_old_view(validator, view_no, mode, waiting_for_new_view):
validator._data.node_mode = mode
validator._data.waiting_for_new_view = waiting_for_new_view
validator._data.view_no = view_no + 2
assert validator.validate_pre_prepare(pre_prepare(view_no, 1)) == (DISCARD, OLD_VIEW)
assert validator.validate_prepare(prepare(view_no, 1)) == (DISCARD, OLD_VIEW)
assert validator.validate_commit(commit(view_no, 1)) == (DISCARD, OLD_VIEW)
assert validator.validate_new_view(new_view(view_no)) == (DISCARD, OLD_VIEW)
validator._data.view_no = view_no + 1
assert validator.validate_pre_prepare(pre_prepare(view_no, 1)) == (DISCARD, OLD_VIEW)
assert validator.validate_prepare(prepare(view_no, 1)) == (DISCARD, OLD_VIEW)
assert validator.validate_commit(commit(view_no, 1)) == (DISCARD, OLD_VIEW)
assert validator.validate_new_view(new_view(view_no)) == (DISCARD, OLD_VIEW)
@pytest.mark.parametrize('pp_seq_no, result', [
(1, (DISCARD, ALREADY_ORDERED)),
(50, (DISCARD, ALREADY_ORDERED)),
(99, (DISCARD, ALREADY_ORDERED)),
(100, (DISCARD, ALREADY_ORDERED)),
(101, (PROCESS, None)),
(300, (PROCESS, None)),
(399, (PROCESS, None)),
])
def test_discard_below_watermark_3pc(validator, view_no, pp_seq_no, result):
validator._data.last_ordered_3pc = (0, 1)
validator._data.low_watermark = 100
validator._data.high_watermark = 400
assert validator.validate_pre_prepare(pre_prepare(view_no, pp_seq_no)) == result
assert validator.validate_prepare(prepare(view_no, pp_seq_no)) == result
assert validator.validate_commit(commit(view_no, pp_seq_no)) == result
def test_discard_below_watermark_3pc_no_stash(validator, view_no, mode, waiting_for_new_view):
validator._data.node_mode = mode
validator._data.waiting_for_new_view = waiting_for_new_view
validator._data.low_watermark = 100
validator._data.high_watermark = 400
assert validator.validate_pre_prepare(pre_prepare(view_no, 99)) == (DISCARD, ALREADY_ORDERED)
assert validator.validate_prepare(prepare(view_no, 99)) == (DISCARD, ALREADY_ORDERED)
assert validator.validate_commit(commit(view_no, 99)) == (DISCARD, ALREADY_ORDERED)
def test_discard_incorrect_inst_id(validator, view_no):
inst_id = validator._data.inst_id + 1
assert validator.validate_pre_prepare(pre_prepare(view_no, 1, inst_id)) == (DISCARD, INCORRECT_INSTANCE)
assert validator.validate_prepare(prepare(view_no, 1, inst_id)) == (DISCARD, INCORRECT_INSTANCE)
assert validator.validate_commit(commit(view_no, 1, inst_id)) == (DISCARD, INCORRECT_INSTANCE)
@pytest.mark.parametrize('pp_seq_no', [1, 9, 10, 11])
def test_process_ordered_pre_prepare(validator, view_no, pp_seq_no):
validator._data.last_ordered_3pc = (view_no, 10)
validator._data.prev_view_prepare_cert = 10
assert validator.validate_pre_prepare(pre_prepare(view_no, pp_seq_no)) == (PROCESS, None)
@pytest.mark.parametrize('mode, result', [
(Mode.starting, (STASH_CATCH_UP, CATCHING_UP)),
(Mode.discovering, (STASH_CATCH_UP, CATCHING_UP)),
(Mode.discovered, (STASH_CATCH_UP, CATCHING_UP)),
(Mode.syncing, (STASH_CATCH_UP, CATCHING_UP)),
(Mode.synced, (STASH_CATCH_UP, CATCHING_UP)),
(Mode.participating, (PROCESS, None)),
])
def test_stash_while_catchup(validator, view_no, mode, result):
validator._data.node_mode = mode
assert validator.validate_pre_prepare(pre_prepare(view_no, 1)) == result
assert validator.validate_prepare(prepare(view_no, 1)) == result
assert validator.validate_commit(commit(view_no, 1)) == result
assert validator.validate_new_view(new_view(view_no)) == result
assert validator.validate_old_view_prep_prepare_req(old_view_pp_req()) == result
assert validator.validate_old_view_prep_prepare_rep(old_view_pp_rep()) == result
def test_stash_future_view(validator, view_no):
assert validator.validate_pre_prepare(pre_prepare(view_no + 1, 1)) == (STASH_VIEW_3PC, FUTURE_VIEW)
assert validator.validate_prepare(prepare(view_no + 1, 1)) == (STASH_VIEW_3PC, FUTURE_VIEW)
assert validator.validate_commit(commit(view_no + 1, 1)) == (STASH_VIEW_3PC, FUTURE_VIEW)
assert validator.validate_new_view(new_view(view_no + 1)) == (STASH_VIEW_3PC, FUTURE_VIEW)
assert validator.validate_pre_prepare(pre_prepare(view_no + 2, 1)) == (STASH_VIEW_3PC, FUTURE_VIEW)
assert validator.validate_prepare(prepare(view_no + 2, 1)) == (STASH_VIEW_3PC, FUTURE_VIEW)
assert validator.validate_commit(commit(view_no + 2, 1)) == (STASH_VIEW_3PC, FUTURE_VIEW)
assert validator.validate_new_view(new_view(view_no + 2)) == (STASH_VIEW_3PC, FUTURE_VIEW)
def test_stash_waiting_for_new_view_3pc(validator, view_no):
validator._data.waiting_for_new_view = True
assert validator.validate_pre_prepare(pre_prepare(view_no, 1)) == (STASH_VIEW_3PC, WAITING_FOR_NEW_VIEW)
assert validator.validate_prepare(prepare(view_no, 1)) == (STASH_VIEW_3PC, WAITING_FOR_NEW_VIEW)
assert validator.validate_commit(commit(view_no, 1)) == (STASH_VIEW_3PC, WAITING_FOR_NEW_VIEW)
def test_stash_waiting_for_new_view_old_view_pp_rep(validator, view_no):
validator._data.waiting_for_new_view = True
assert validator.validate_old_view_prep_prepare_rep(old_view_pp_rep()) == (
STASH_VIEW_3PC, WAITING_FOR_NEW_VIEW)
def test_process_waiting_for_new_view_old_view_pp_req(validator, view_no):
validator._data.waiting_for_new_view = True
assert validator.validate_old_view_prep_prepare_req(old_view_pp_req()) == (PROCESS, None)
@pytest.mark.parametrize('pp_seq_no, result', [
(101, (PROCESS, None)),
(300, (PROCESS, None)),
(399, (PROCESS, None)),
(400, (PROCESS, None)),
(401, (STASH_WATERMARKS, OUTSIDE_WATERMARKS)),
(402, (STASH_WATERMARKS, OUTSIDE_WATERMARKS)),
(100000, (STASH_WATERMARKS, OUTSIDE_WATERMARKS)),
])
def test_stash_above_watermark_3pc(validator, view_no, pp_seq_no, result):
validator._data.last_ordered_3pc = (0, 1)
validator._data.low_watermark = 100
validator._data.high_watermark = 400
assert validator.validate_pre_prepare(pre_prepare(view_no, pp_seq_no)) == result
assert validator.validate_prepare(prepare(view_no, pp_seq_no)) == result
assert validator.validate_commit(commit(view_no, pp_seq_no)) == result
def test_process_waiting_for_new_view_new_view(validator, view_no):
validator._data.waiting_for_new_view = True
assert validator.validate_new_view(new_view(view_no)) == (PROCESS, None)
@pytest.mark.parametrize('pp_seq_no', [1, 9, 10, 11])
def test_process_ordered_prepare_commit(validator, view_no, pp_seq_no):
validator._data.last_ordered_3pc = (view_no, 10)
validator._data.prev_view_prepare_cert = 10
assert validator.validate_prepare(prepare(view_no, pp_seq_no)) == (PROCESS, None)
assert validator.validate_commit(commit(view_no, pp_seq_no)) == (PROCESS, None)
def test_discard_non_master_old_view_pp_req(validator):
old_view_pp_req_msg = old_view_pp_req()
validator._data.is_master = False
validator._data.inst_id = 1
old_view_pp_req_msg.instId = validator._data.inst_id
assert validator.validate_old_view_prep_prepare_req(old_view_pp_req_msg) == (DISCARD, NON_MASTER)
def test_discard_non_master_old_view_pp_rep(validator):
old_view_pp_rep_msg = old_view_pp_rep()
validator._data.is_master = False
validator._data.inst_id = 1
old_view_pp_rep_msg.instId = validator._data.inst_id
assert validator.validate_old_view_prep_prepare_rep(old_view_pp_rep_msg) == (DISCARD, NON_MASTER)
def test_discard_old_view_pp_req_with_incorrect_inst_id(validator):
old_view_pp_req_msg = old_view_pp_req()
old_view_pp_req_msg.instId = validator._data.inst_id + 1
assert validator.validate_old_view_prep_prepare_req(old_view_pp_req_msg) == (DISCARD, INCORRECT_INSTANCE)
def test_discard_old_view_pp_rep_with_incorrect_inst_id(validator):
old_view_pp_rep_msg = old_view_pp_rep()
old_view_pp_rep_msg.instId = validator._data.inst_id + 1
assert validator.validate_old_view_prep_prepare_rep(old_view_pp_rep_msg) == (DISCARD, INCORRECT_INSTANCE)
def test_process_non_master_new_view(validator, view_no):
validator._data.is_master = False
validator._data.inst_id = 1
assert validator.validate_new_view(new_view(view_no)) == (PROCESS, None)
@pytest.mark.parametrize('pp_seq_no, result', [
(10, (PROCESS, None)),
(11, (PROCESS, None)),
(12, (STASH_WAITING_FIRST_BATCH_IN_VIEW, WAITING_FIRST_BATCH_IN_VIEW)),
(13, (STASH_WAITING_FIRST_BATCH_IN_VIEW, WAITING_FIRST_BATCH_IN_VIEW)),
(100, (STASH_WAITING_FIRST_BATCH_IN_VIEW, WAITING_FIRST_BATCH_IN_VIEW)),
])
def test_stash_from_new_view_until_first_batch_is_ordered_non_zero_view(validator, pp_seq_no, result):
validator._data.view_no = 1
validator._data.prev_view_prepare_cert = 10
validator._data.last_ordered_3pc = (1, 10)
assert validator.validate_pre_prepare(pre_prepare(1, pp_seq_no)) == result
assert validator.validate_prepare(prepare(1, pp_seq_no)) == result
assert validator.validate_commit(commit(1, pp_seq_no)) == result
@pytest.mark.parametrize('pp_seq_no, result', [
(10, (PROCESS, None)),
(11, (DISCARD, ALREADY_ORDERED)),
(12, (PROCESS, None)),
(13, (PROCESS, None)),
(100, (PROCESS, None)),
])
def test_process_from_new_view_if_first_batch_is_ordered_non_zero_view(validator, pp_seq_no, result):
validator._data.view_no = 1
validator._data.prev_view_prepare_cert = 10
validator._data.last_ordered_3pc = (1, 11)
assert validator.validate_pre_prepare(pre_prepare(1, pp_seq_no)) == result
assert validator.validate_prepare(prepare(1, pp_seq_no)) == result
assert validator.validate_commit(commit(1, pp_seq_no)) == result
@pytest.mark.parametrize('pp_seq_no, result', [
(10, (PROCESS, None)),
(11, (PROCESS, None)),
(12, (PROCESS, None)),
(13, (PROCESS, None)),
(100, (PROCESS, None)),
])
def test_process_from_new_view_if_first_batch_not_ordered_zero_view(validator, pp_seq_no, result):
validator._data.view_no = 0
validator._data.prev_view_prepare_cert = 10
validator._data.last_ordered_3pc = (0, 10)
assert validator.validate_pre_prepare(pre_prepare(0, pp_seq_no)) == result
assert validator.validate_prepare(prepare(0, pp_seq_no)) == result
assert validator.validate_commit(commit(0, pp_seq_no)) == result
|
|
from time_tracking.forms import ClockForm
from time_tracking.templatetags import clockformats
from expenses.templatetags import moneyformats
from time_tracking.middleware import CurrentUserMiddleware
from time_tracking.models import Clock, Project, Activity, ClockOptions, ActivityOptions, TimeTrackingGroup
from django import forms
from django.conf import settings
from django.contrib import admin
from django.http import HttpResponse
from django.utils.translation import ugettext_lazy as _, ugettext
from django.http import HttpResponseRedirect
from django.contrib import messages
from django.core.exceptions import PermissionDenied
class ActivityAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'activity_type', 'time_factor')
class ClockOptionsAdmin(admin.ModelAdmin):
list_display = ('username', 'display_balance', 'display_closing', 'hours_per_week', 'unpaid_break', 'weekday_1', 'weekday_2', 'weekday_3', 'weekday_4', 'weekday_5', 'weekday_6', 'weekday_7')
class ActivityOptionsAdmin(admin.ModelAdmin):
list_display = ('activity', 'username', 'rate_formatted')
def rate_formatted(self, obj):
return moneyformats.money(obj.rate)
rate_formatted.short_description = _('rate')
rate_formatted.admin_order_field = 'rate'
class ClockInForm(forms.Form):
def __init__(self, *args, **kwargs):
super(ClockInForm, self).__init__(*args, **kwargs)
# Field is added on __init__ due to current-user-related queryset
self.fields['project'] = forms.ModelChoiceField(label=_('Project'),
queryset=Project.get_queryset_for_current_user(), required=False)
class ClockAdmin(admin.ModelAdmin):
date_hierarchy = 'start'
list_display = ('status_icon', 'weekday', 'start_date', 'start_time', 'end_time', 'hours_rounded', 'hours_credited_rounded', 'activity', 'rate_formatted', 'cost_formatted', 'project', 'comment')
list_display_links = ('status_icon', 'weekday', 'start_date',)
form = ClockForm
ordering = ['-start']
if 'billing' in settings.INSTALLED_APPS:
actions = ['bill_selected']
def queryset(self, request):
"""
Filter the objects displayed in the change_list to only
display those for the currently signed in user.
"""
qs = super(ClockAdmin, self).queryset(request)
if not request.user.has_perm('time_tracking.can_set_user'):
qs = qs.filter(user=request.user)
return qs
def bill_selected(self, request, queryset):
from billing.models import ClockBill
bill = ClockBill()
bill.save()
if queryset.filter(bill=None).count() != queryset.count():
self.message_user(request, _('Some entries were already billed and were not added to this bill.'))
for obj in queryset.filter(bill=None):
obj.bill = bill
if not obj.billed_rate:
obj.billed_rate = obj.get_rate() or 0
if not obj.billed_time_factor:
obj.billed_time_factor = obj.activity.time_factor
obj.save()
return HttpResponseRedirect(bill.get_admin_url())
bill_selected.short_description = _('Create bill with selected %(verbose_name_plural)s')
def add_view(self, request, form_url='', extra_context=None):
if not request.user.has_perm('time_tracking.can_set_user'):
self.exclude = ('user',)
else:
self.exclude = ()
return super(ClockAdmin, self).add_view(request, form_url, extra_context)
def change_view(self, request, object_id, form_url='', extra_context=None):
if not request.user.has_perm('time_tracking.can_set_user'):
self.exclude = ('user',)
else:
self.exclude = ()
return super(ClockAdmin, self).change_view(request, object_id, form_url, extra_context)
def changelist_view(self, request, extra_context=None):
if not request.user.has_perm('time_tracking.can_set_user'):
if 'user' in self.list_display:
list_display = list(self.list_display)
list_display.remove('user')
self.list_display = tuple(list_display)
self.list_filter = ['start', 'project', 'activity']
else:
if 'user' not in self.list_display:
self.list_display += ('user',)
self.list_filter = ['start', 'project', 'activity', 'user']
from django.contrib.admin.views.main import ChangeList
cl = ChangeList(request, self.model, self.list_display, self.list_display_links,
self.list_filter, self.date_hierarchy, self.search_fields,
self.list_select_related, self.list_per_page, self.list_max_show_all, self.list_editable, self)
clocked_in_time = Clock.clocked_in_time(request.user)
if clocked_in_time and clocked_in_time.project:
# TODO this is not working
initial = {'project': clocked_in_time.project}
else:
initial = {'project': Project.get_latest_for_current_user()}
extra_context = {
'time_info': Clock.summarize(request.user, cl.query_set),
'clock_in_form': ClockInForm(initial=initial),
}
return super(ClockAdmin, self).changelist_view(request, extra_context)
def get_urls(self):
from django.conf.urls.defaults import patterns, url
urls = super(ClockAdmin, self).get_urls()
url_patterns = patterns('',
url(r'^in/$', self.admin_site.admin_view(self.clock_in), name="time_tracking_clock_in"),
url(r'^out/$', self.admin_site.admin_view(self.clock_out), name="time_tracking_clock_out"),
)
url_patterns.extend(urls)
return url_patterns
def clock_in(self, request):
if not self.has_add_permission(request):
raise PermissionDenied
else:
clocked_in_time = Clock.clocked_in_time(request.user)
project = None
if request.method == 'POST':
form = ClockInForm(request.POST)
if not form.is_valid():
raise Exception(forms.ValidationError)
if form.cleaned_data:
project = form.cleaned_data['project']
else:
raise forms.ValidationError('Invalid project')
can_clock_in = not clocked_in_time
if clocked_in_time:
if not project or clocked_in_time.project == project:
messages.add_message(request, messages.WARNING, _("Please clock out first. Clocked in: %s") % clocked_in_time.__unicode__())
else:
clocked_in_time.clock_out()
messages.add_message(request, messages.SUCCESS, _("Clocked out: %s") % clocked_in_time.__unicode__())
can_clock_in = True
if can_clock_in:
try:
clock_in_time = Clock.clock_in(request.user, project)
if project:
messages.add_message(request, messages.SUCCESS, _("Clocked into %(project)s: %(clock)s") %
{'clock': clock_in_time.__unicode__(), 'project': project.__unicode__()})
else:
messages.add_message(request, messages.SUCCESS, _("Clocked in: %(clock)s") %
{'clock': clock_in_time.__unicode__()})
except ValueError:
messages.add_message(request, messages.WARNING, _("In order to be able to clock in, you'll have to create a first entry."))
return HttpResponseRedirect('../')
def clock_out(self, request):
if not self.has_change_permission(request):
raise PermissionDenied
else:
clocked_in_time = Clock.clocked_in_time(request.user)
if (clocked_in_time != None):
clocked_in_time.clock_out()
messages.add_message(request, messages.SUCCESS, _("Clocked out: %s") % clocked_in_time.__unicode__())
else:
messages.add_message(request, messages.WARNING, _("Please clock in first."))
return HttpResponseRedirect('../')
def cost_formatted(self, obj):
return moneyformats.money(obj.get_cost())
cost_formatted.short_description = _('cost')
def rate_formatted(self, obj):
return moneyformats.money(obj.get_rate())
rate_formatted.short_description = _('rate')
class ProjectAdmin(admin.ModelAdmin):
list_display = ('name', 'group_names', 'status', 'budget_formatted', 'hours_sum_formatted', 'cost_sum_formatted', 'balance_formatted', 'coverage_formatted')
def group_names(self, obj):
return ', '.join([group.__unicode__() for group in obj.groups.all()])
group_names.short_description = _('groups')
def budget_formatted(self, obj):
return moneyformats.money(obj.budget)
budget_formatted.short_description = _('budget')
budget_formatted.admin_order_field = 'budget'
def hours_sum_formatted(self, obj):
return clockformats.hours(obj.sum_hours(), units=False)
hours_sum_formatted.short_description = _('hours spent')
def cost_sum_formatted(self, obj):
return moneyformats.money(obj.sum_cost())
cost_sum_formatted.short_description = _('budget spent')
def balance_formatted(self, obj):
return moneyformats.money(obj.balance())
balance_formatted.short_description = _('balance')
def coverage_formatted(self, obj):
return moneyformats.percent(obj.coverage())
coverage_formatted.short_description = _('coverage')
class TimeTrackingGroupAdmin(admin.ModelAdmin):
list_display = ('name', 'user_names', 'clock_sum')
def user_names(self, obj):
return ', '.join([user.__unicode__() for user in obj.user_set.all()])
user_names.short_description = _('users')
def clock_sum(self, obj):
return ''
#return money(Expense.objects.filter(expense_group=obj).aggregate(Sum('amount'))['amount__sum'])
clock_sum.short_description = _('total')
admin.site.register(Clock, ClockAdmin)
admin.site.register(Project, ProjectAdmin)
admin.site.register(ClockOptions, ClockOptionsAdmin)
admin.site.register(ActivityOptions, ActivityOptionsAdmin)
admin.site.register(Activity, ActivityAdmin)
admin.site.register(TimeTrackingGroup, TimeTrackingGroupAdmin)
|
|
# -*-python-*-
import cgi
import os
import shutil
import tempfile
import subprocess
import json
import re
import codecs
from threading import Timer
import config
from cherrypy.lib.static import serve_file
from cherrypy.lib.cptools import allow
from cherrypy import HTTPRedirect
from mako.template import Template
from mako.lookup import TemplateLookup
lookup = TemplateLookup(directories=['html'])
# ============================================================
# Application Entry
# ============================================================
class Main(object):
# gives access to images/
def images(self, filename, *args, **kwargs):
allow(["HEAD", "GET"])
abspath = os.path.abspath("images/" + filename)
return serve_file(abspath, "image/png")
images.exposed = True
def js(self, filename, *args, **kwargs):
allow(["HEAD", "GET"])
abspath = os.path.abspath("js/" + filename)
return serve_file(abspath, "application/javascript")
js.exposed = True
def css(self, filename, *args, **kwargs):
allow(["HEAD", "GET"])
abspath = os.path.abspath("css/" + filename)
return serve_file(abspath, "text/css")
css.exposed = True
def compile(self, code, verify, *args, **kwargs):
allow(["HEAD", "POST"])
# First, create working directory
dir = createWorkingDirectory()
dir = config.DATA_DIR + "/" + dir
# Second, compile the code
result = compile(code,verify,dir)
# Third, delete working directory
shutil.rmtree(dir)
# Fouth, return result as JSON
if type(result) == str:
response = {"result": "error", "error": result}
elif len(result) != 0:
response = {"result": "errors", "errors": result}
else:
response = {"result": "success"}
return json.dumps(response)
compile.exposed = True
def save(self, code, *args, **kwargs):
allow(["HEAD", "POST"])
# First, create working directory
dir = createWorkingDirectory()
# Second, save the file
save(config.DATA_DIR + "/" + dir + "/tmp.whiley", code, "utf-8")
# Fouth, return result as JSON
return json.dumps({
"id": dir
})
save.exposed = True
def run(self, code, *args, **kwargs):
allow(["HEAD", "POST"])
# First, create working directory
dir = createWorkingDirectory()
dir = config.DATA_DIR + "/" + dir
# Second, compile the code and then run it
result = compile(code,"false",dir)
if type(result) == str:
response = {"result": "error", "error": result}
elif len(result) != 0:
response = {"result": "errors", "errors": result}
else:
response = {"result": "success"}
# Run the code if the compilation succeeded.
output = run(dir)
response["output"] = output
# Third, delete working directory
shutil.rmtree(dir)
# Fourth, return result as JSON
return json.dumps(response)
run.exposed = True
# application root
def index(self, id="HelloWorld", *args, **kwargs):
allow(["HEAD", "GET"])
error = ""
redirect = "NO"
try:
# Sanitize the ID.
safe_id = re.sub("[^a-zA-Z0-9-_]+", "", id)
# Load the file
code = load(config.DATA_DIR + "/" + safe_id + "/tmp.whiley","utf-8")
# Escape the code
code = cgi.escape(code)
except Exception:
code = ""
error = "Invalid ID: %s" % id
redirect = "YES"
template = lookup.get_template("index.html")
return template.render(ROOT_URL=config.VIRTUAL_URL,CODE=code,ERROR=error,REDIRECT=redirect)
index.exposed = True
# exposed
# Everything else should redirect to the main page.
def default(self, *args, **kwargs):
raise HTTPRedirect("/")
default.exposed = True
# ============================================================
# Compiler Interface
# ============================================================
# Load a given JSON file from the filesystem
def load(filename,encoding):
f = codecs.open(filename,"r",encoding)
data = f.read()
f.close()
return data
# Save a given file to the filesystem
def save(filename,data,encoding):
f = codecs.open(filename,"w",encoding)
f.write(data)
f.close()
return
# Compile a snippet of Whiley code. This is done by saving the file
# to disk in a temporary location, compiling it using the Whiley2Java
# Compiler and then returning the compilation output.
def compile(code,verify,dir):
filename = dir + "/tmp.whiley"
# set required arguments
args = [
config.JAVA_CMD,
"-jar",
config.WYJC_JAR,
"-bootpath", config.WYRT_JAR, # set bootpath
"-whileydir", dir, # set location of Whiley source file(s)
"-classdir", dir, # set location to place class file(s)
"-brief" # enable brief compiler output (easier to parse)
]
# Configure optional arguments
if verify == "true":
args.append("-verify")
# save the file
save(filename, code, "utf-8")
args.append(filename)
# run the compiler
try:
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)
# Configure Timeout
kill_proc = lambda p: p.kill()
timer = Timer(30, kill_proc, [proc])
timer.start()
# Run process
(out, err) = proc.communicate()
timer.cancel()
# Check what happened
if proc.returncode < 0:
return [{
"filename": "",
"line": "",
"start": "",
"end": "",
"text": "Compiling / Verifying your program took too long!"
}]
# Continue
if err == None:
return splitErrors(out)
else:
return splitErrors(err)
except Exception as ex:
# error, so return that
return "Compile Error: " + str(ex)
def run(dir):
try:
# run the JVM
proc = subprocess.Popen([
config.JAVA_CMD,
"-Djava.security.manager",
"-Djava.security.policy=whiley.policy",
"-cp",config.WYJC_JAR + ":" + dir,
"tmp"
], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)
# Configure Timeout
kill_proc = lambda p: p.kill()
timer = Timer(20, kill_proc, [proc])
timer.start()
# Run process
(out, err) = proc.communicate()
timer.cancel()
# Check what happened
if proc.returncode >= 0:
return out
else:
return "Timeout: Your program ran for too long!"
except Exception as ex:
# error, so return that
return "Run Error: " + str(ex)
# Split errors output from WyC into a list of JSON records, each of
# which includes the filename, the line number, the column start and
# end, as well a the text of the error itself.
def splitErrors(errors):
r = []
for err in errors.split("\n"):
if err != "":
r.append(splitError(err))
return r
def splitError(error):
parts = error.split(":", 4)
if len(parts) >= 5:
context = []
if len(parts) == 6:
context = splitContext(parts[5])
return {
"filename": parts[0],
"line": int(parts[1]),
"start": int(parts[2]),
"end": int(parts[3]),
"text": parts[4],
"context": context
}
else:
return {
"filename": "",
"line": "",
"start": "",
"end": "",
"text": error
}
def splitContext(context):
r = []
cs = context.split(",")
for c in cs:
parts = c.split(":", 3)
r.append({
"filename": parts[0],
"line": int(parts[1]),
"start": int(parts[2]),
"end": int(parts[3])
})
return r
# Get the working directory for this request.
def createWorkingDirectory():
dir = tempfile.mkdtemp(prefix="",dir=config.DATA_DIR)
tail,head = os.path.split(dir)
return head
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
from lxml import etree
from cinder import utils
XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0'
XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
XMLNS_COMMON_V10 = 'http://docs.openstack.org/common/api/v1.0'
XMLNS_ATOM = 'http://www.w3.org/2005/Atom'
XMLNS_VOLUME_V1 = 'http://docs.openstack.org/volume/api/v1'
XMLNS_VOLUME_V2 = ('http://docs.openstack.org/api/openstack-volume/2.0/'
'content')
def validate_schema(xml, schema_name):
if isinstance(xml, str):
xml = etree.fromstring(xml)
base_path = 'cinder/api/schemas/v1.1/'
if schema_name in ('atom', 'atom-link'):
base_path = 'cinder/api/schemas/'
schema_path = os.path.join(utils.cinderdir(),
'%s%s.rng' % (base_path, schema_name))
schema_doc = etree.parse(schema_path)
relaxng = etree.RelaxNG(schema_doc)
relaxng.assertValid(xml)
class Selector(object):
"""Selects datum to operate on from an object."""
def __init__(self, *chain):
"""Initialize the selector.
Each argument is a subsequent index into the object.
"""
self.chain = chain
def __repr__(self):
"""Return a representation of the selector."""
return "Selector" + repr(self.chain)
def __call__(self, obj, do_raise=False):
"""Select a datum to operate on.
Selects the relevant datum within the object.
:param obj: The object from which to select the object.
:param do_raise: If False (the default), return None if the
indexed datum does not exist. Otherwise,
raise a KeyError.
"""
# Walk the selector list
for elem in self.chain:
# If it's callable, call it
if callable(elem):
obj = elem(obj)
else:
# Use indexing
try:
obj = obj[elem]
except (KeyError, IndexError):
# No sense going any further
if do_raise:
# Convert to a KeyError, for consistency
raise KeyError(elem)
return None
# Return the finally-selected object
return obj
def get_items(obj):
"""Get items in obj."""
return list(obj.items())
class EmptyStringSelector(Selector):
"""Returns the empty string if Selector would return None."""
def __call__(self, obj, do_raise=False):
"""Returns empty string if the selected value does not exist."""
try:
return super(EmptyStringSelector, self).__call__(obj, True)
except KeyError:
return ""
class ConstantSelector(object):
"""Returns a constant."""
def __init__(self, value):
"""Initialize the selector.
:param value: The value to return.
"""
self.value = value
def __repr__(self):
"""Return a representation of the selector."""
return repr(self.value)
def __call__(self, _obj, _do_raise=False):
"""Select a datum to operate on.
Returns a constant value. Compatible with
Selector.__call__().
"""
return self.value
class TemplateElement(object):
"""Represent an element in the template."""
def __init__(self, tag, attrib=None, selector=None, subselector=None,
**extra):
"""Initialize an element.
Initializes an element in the template. Keyword arguments
specify attributes to be set on the element; values must be
callables. See TemplateElement.set() for more information.
:param tag: The name of the tag to create.
:param attrib: An optional dictionary of element attributes.
:param selector: An optional callable taking an object and
optional boolean do_raise indicator and
returning the object bound to the element.
:param subselector: An optional callable taking an object and
optional boolean do_raise indicator and
returning the object bound to the element.
This is used to further refine the datum
object returned by selector in the event
that it is a list of objects.
"""
# Convert selector into a Selector
if selector is None:
selector = Selector()
elif not callable(selector):
selector = Selector(selector)
# Convert subselector into a Selector
if subselector is not None and not callable(subselector):
subselector = Selector(subselector)
self.tag = tag
self.selector = selector
self.subselector = subselector
self.attrib = {}
self._text = None
self._children = []
self._childmap = {}
# Run the incoming attributes through set() so that they
# become selectorized
if not attrib:
attrib = {}
attrib.update(extra)
for k, v in attrib.items():
self.set(k, v)
def __repr__(self):
"""Return a representation of the template element."""
return ('<%s.%s %r at %#x>' %
(self.__class__.__module__, self.__class__.__name__,
self.tag, id(self)))
def __len__(self):
"""Return the number of child elements."""
return len(self._children)
def __contains__(self, key):
"""Determine whether a child node named by key exists."""
return key in self._childmap
def __getitem__(self, idx):
"""Retrieve a child node by index or name."""
if isinstance(idx, basestring):
# Allow access by node name
return self._childmap[idx]
else:
return self._children[idx]
def append(self, elem):
"""Append a child to the element."""
# Unwrap templates...
elem = elem.unwrap()
# Avoid duplications
if elem.tag in self._childmap:
raise KeyError(elem.tag)
self._children.append(elem)
self._childmap[elem.tag] = elem
def extend(self, elems):
"""Append children to the element."""
# Pre-evaluate the elements
elemmap = {}
elemlist = []
for elem in elems:
# Unwrap templates...
elem = elem.unwrap()
# Avoid duplications
if elem.tag in self._childmap or elem.tag in elemmap:
raise KeyError(elem.tag)
elemmap[elem.tag] = elem
elemlist.append(elem)
# Update the children
self._children.extend(elemlist)
self._childmap.update(elemmap)
def insert(self, idx, elem):
"""Insert a child element at the given index."""
# Unwrap templates...
elem = elem.unwrap()
# Avoid duplications
if elem.tag in self._childmap:
raise KeyError(elem.tag)
self._children.insert(idx, elem)
self._childmap[elem.tag] = elem
def remove(self, elem):
"""Remove a child element."""
# Unwrap templates...
elem = elem.unwrap()
# Check if element exists
if elem.tag not in self._childmap or self._childmap[elem.tag] != elem:
raise ValueError(_('element is not a child'))
self._children.remove(elem)
del self._childmap[elem.tag]
def get(self, key):
"""Get an attribute.
Returns a callable which performs datum selection.
:param key: The name of the attribute to get.
"""
return self.attrib[key]
def set(self, key, value=None):
"""Set an attribute.
:param key: The name of the attribute to set.
:param value: A callable taking an object and optional boolean
do_raise indicator and returning the datum bound
to the attribute. If None, a Selector() will be
constructed from the key. If a string, a
Selector() will be constructed from the string.
"""
# Convert value to a selector
if value is None:
value = Selector(key)
elif not callable(value):
value = Selector(value)
self.attrib[key] = value
def keys(self):
"""Return the attribute names."""
return self.attrib.keys()
def items(self):
"""Return the attribute names and values."""
return self.attrib.items()
def unwrap(self):
"""Unwraps a template to return a template element."""
# We are a template element
return self
def wrap(self):
"""Wraps a template element to return a template."""
# Wrap in a basic Template
return Template(self)
def apply(self, elem, obj):
"""Apply text and attributes to an etree.Element.
Applies the text and attribute instructions in the template
element to an etree.Element instance.
:param elem: An etree.Element instance.
:param obj: The base object associated with this template
element.
"""
# Start with the text...
if self.text is not None:
elem.text = unicode(self.text(obj))
# Now set up all the attributes...
for key, value in self.attrib.items():
try:
elem.set(key, unicode(value(obj, True)))
except KeyError:
# Attribute has no value, so don't include it
pass
def getAttrib(self, obj):
"""Get attribute."""
tmpattrib = {}
#Now set up all the attributes...
for key, value in self.attrib.items():
try:
tmpattrib[key] = value(obj)
except KeyError:
# Attribute has no value, so don't include it
pass
return tmpattrib
def _render(self, parent, datum, patches, nsmap):
"""Internal rendering.
Renders the template node into an etree.Element object.
Returns the etree.Element object.
:param parent: The parent etree.Element instance.
:param datum: The datum associated with this template element.
:param patches: A list of other template elements that must
also be applied.
:param nsmap: An optional namespace dictionary to be
associated with the etree.Element instance.
"""
# Allocate a node
if callable(self.tag):
tagname = self.tag(datum)
else:
tagname = self.tag
# If the datum is None
if datum is not None:
tmpattrib = self.getAttrib(datum)
else:
tmpattrib = {}
tagnameList = tagname.split(':')
insertIndex = 0
#If parent is not none and has same tagname
if parent is not None:
for i in range(0, len(tagnameList)):
tmpInsertPos = parent.find(tagnameList[i])
if tmpInsertPos is None:
break
elif not cmp(parent.attrib, tmpattrib) == 0:
break
parent = tmpInsertPos
insertIndex = i + 1
if insertIndex >= len(tagnameList):
insertIndex = insertIndex - 1
#Create root elem
elem = etree.Element(tagnameList[insertIndex], nsmap=nsmap)
rootelem = elem
subelem = elem
#Create subelem
for i in range((insertIndex + 1), len(tagnameList)):
subelem = etree.SubElement(elem, tagnameList[i])
elem = subelem
# If we have a parent, append the node to the parent
if parent is not None:
#If we can merge this element, then insert
if insertIndex > 0:
parent.insert(len(list(parent)), rootelem)
else:
parent.append(rootelem)
# If the datum is None, do nothing else
if datum is None:
return rootelem
# Apply this template element to the element
self.apply(subelem, datum)
# Additionally, apply the patches
for patch in patches:
patch.apply(subelem, datum)
# We have fully rendered the element; return it
return rootelem
def render(self, parent, obj, patches=[], nsmap=None):
"""Render an object.
Renders an object against this template node. Returns a list
of two-item tuples, where the first item is an etree.Element
instance and the second item is the datum associated with that
instance.
:param parent: The parent for the etree.Element instances.
:param obj: The object to render this template element
against.
:param patches: A list of other template elements to apply
when rendering this template element.
:param nsmap: An optional namespace dictionary to attach to
the etree.Element instances.
"""
# First, get the datum we're rendering
data = None if obj is None else self.selector(obj)
# Check if we should render at all
if not self.will_render(data):
return []
elif data is None:
return [(self._render(parent, None, patches, nsmap), None)]
# Make the data into a list if it isn't already
if not isinstance(data, list):
data = [data]
elif parent is None:
raise ValueError(_('root element selecting a list'))
# Render all the elements
elems = []
for datum in data:
if self.subselector is not None:
datum = self.subselector(datum)
elems.append((self._render(parent, datum, patches, nsmap), datum))
# Return all the elements rendered, as well as the
# corresponding datum for the next step down the tree
return elems
def will_render(self, datum):
"""Hook method.
An overridable hook method to determine whether this template
element will be rendered at all. By default, returns False
(inhibiting rendering) if the datum is None.
:param datum: The datum associated with this template element.
"""
# Don't render if datum is None
return datum is not None
def _text_get(self):
"""Template element text.
Either None or a callable taking an object and optional
boolean do_raise indicator and returning the datum bound to
the text of the template element.
"""
return self._text
def _text_set(self, value):
# Convert value to a selector
if value is not None and not callable(value):
value = Selector(value)
self._text = value
def _text_del(self):
self._text = None
text = property(_text_get, _text_set, _text_del)
def tree(self):
"""Return string representation of the template tree.
Returns a representation of the template rooted at this
element as a string, suitable for inclusion in debug logs.
"""
# Build the inner contents of the tag...
contents = [self.tag, '!selector=%r' % self.selector]
# Add the text...
if self.text is not None:
contents.append('!text=%r' % self.text)
# Add all the other attributes
for key, value in self.attrib.items():
contents.append('%s=%r' % (key, value))
# If there are no children, return it as a closed tag
if len(self) == 0:
return '<%s/>' % ' '.join([str(i) for i in contents])
# OK, recurse to our children
children = [c.tree() for c in self]
# Return the result
return ('<%s>%s</%s>' %
(' '.join(contents), ''.join(children), self.tag))
def SubTemplateElement(parent, tag, attrib=None, selector=None,
subselector=None, **extra):
"""Create a template element as a child of another.
Corresponds to the etree.SubElement interface. Parameters are as
for TemplateElement, with the addition of the parent.
"""
# Convert attributes
attrib = attrib or {}
attrib.update(extra)
# Get a TemplateElement
elem = TemplateElement(tag, attrib=attrib, selector=selector,
subselector=subselector)
# Append the parent safely
if parent is not None:
parent.append(elem)
return elem
class Template(object):
"""Represent a template."""
def __init__(self, root, nsmap=None):
"""Initialize a template.
:param root: The root element of the template.
:param nsmap: An optional namespace dictionary to be
associated with the root element of the
template.
"""
self.root = root.unwrap() if root is not None else None
self.nsmap = nsmap or {}
self.serialize_options = dict(encoding='UTF-8', xml_declaration=True)
def _serialize(self, parent, obj, siblings, nsmap=None):
"""Internal serialization.
Recursive routine to build a tree of etree.Element instances
from an object based on the template. Returns the first
etree.Element instance rendered, or None.
:param parent: The parent etree.Element instance. Can be
None.
:param obj: The object to render.
:param siblings: The TemplateElement instances against which
to render the object.
:param nsmap: An optional namespace dictionary to be
associated with the etree.Element instance
rendered.
"""
# First step, render the element
elems = siblings[0].render(parent, obj, siblings[1:], nsmap)
# Now, traverse all child elements
seen = set()
for idx, sibling in enumerate(siblings):
for child in sibling:
# Have we handled this child already?
if child.tag in seen:
continue
seen.add(child.tag)
# Determine the child's siblings
nieces = [child]
for sib in siblings[idx + 1:]:
if child.tag in sib:
nieces.append(sib[child.tag])
# Now call this function for all data elements recursively
for elem, datum in elems:
self._serialize(elem, datum, nieces)
# Return the first element; at the top level, this will be the
# root element
if elems:
return elems[0][0]
def serialize(self, obj, *args, **kwargs):
"""Serialize an object.
Serializes an object against the template. Returns a string
with the serialized XML. Positional and keyword arguments are
passed to etree.tostring().
:param obj: The object to serialize.
"""
elem = self.make_tree(obj)
if elem is None:
return ''
for k, v in self.serialize_options.items():
kwargs.setdefault(k, v)
# Serialize it into XML
return etree.tostring(elem, *args, **kwargs)
def make_tree(self, obj):
"""Create a tree.
Serializes an object against the template. Returns an Element
node with appropriate children.
:param obj: The object to serialize.
"""
# If the template is empty, return the empty string
if self.root is None:
return None
# Get the siblings and nsmap of the root element
siblings = self._siblings()
nsmap = self._nsmap()
# Form the element tree
return self._serialize(None, obj, siblings, nsmap)
def _siblings(self):
"""Hook method for computing root siblings.
An overridable hook method to return the siblings of the root
element. By default, this is the root element itself.
"""
return [self.root]
def _nsmap(self):
"""Hook method for computing the namespace dictionary.
An overridable hook method to return the namespace dictionary.
"""
return self.nsmap.copy()
def unwrap(self):
"""Unwraps a template to return a template element."""
# Return the root element
return self.root
def wrap(self):
"""Wraps a template element to return a template."""
# We are a template
return self
def apply(self, master):
"""Hook method for determining slave applicability.
An overridable hook method used to determine if this template
is applicable as a slave to a given master template.
:param master: The master template to test.
"""
return True
def tree(self):
"""Return string representation of the template tree.
Returns a representation of the template as a string, suitable
for inclusion in debug logs.
"""
return "%r: %s" % (self, self.root.tree())
class MasterTemplate(Template):
"""Represent a master template.
Master templates are versioned derivatives of templates that
additionally allow slave templates to be attached. Slave
templates allow modification of the serialized result without
directly changing the master.
"""
def __init__(self, root, version, nsmap=None):
"""Initialize a master template.
:param root: The root element of the template.
:param version: The version number of the template.
:param nsmap: An optional namespace dictionary to be
associated with the root element of the
template.
"""
super(MasterTemplate, self).__init__(root, nsmap)
self.version = version
self.slaves = []
def __repr__(self):
"""Return string representation of the template."""
return ("<%s.%s object version %s at %#x>" %
(self.__class__.__module__, self.__class__.__name__,
self.version, id(self)))
def _siblings(self):
"""Hook method for computing root siblings.
An overridable hook method to return the siblings of the root
element. This is the root element plus the root elements of
all the slave templates.
"""
return [self.root] + [slave.root for slave in self.slaves]
def _nsmap(self):
"""Hook method for computing the namespace dictionary.
An overridable hook method to return the namespace dictionary.
The namespace dictionary is computed by taking the master
template's namespace dictionary and updating it from all the
slave templates.
"""
nsmap = self.nsmap.copy()
for slave in self.slaves:
nsmap.update(slave._nsmap())
return nsmap
def attach(self, *slaves):
"""Attach one or more slave templates.
Attaches one or more slave templates to the master template.
Slave templates must have a root element with the same tag as
the master template. The slave template's apply() method will
be called to determine if the slave should be applied to this
master; if it returns False, that slave will be skipped.
(This allows filtering of slaves based on the version of the
master template.)
"""
slave_list = []
for slave in slaves:
slave = slave.wrap()
# Make sure we have a tree match
if slave.root.tag != self.root.tag:
msg = (_("Template tree mismatch; adding slave %(slavetag)s "
"to master %(mastertag)s") %
{'slavetag': slave.root.tag,
'mastertag': self.root.tag})
raise ValueError(msg)
# Make sure slave applies to this template
if not slave.apply(self):
continue
slave_list.append(slave)
# Add the slaves
self.slaves.extend(slave_list)
def copy(self):
"""Return a copy of this master template."""
# Return a copy of the MasterTemplate
tmp = self.__class__(self.root, self.version, self.nsmap)
tmp.slaves = self.slaves[:]
return tmp
class SlaveTemplate(Template):
"""Represent a slave template.
Slave templates are versioned derivatives of templates. Each
slave has a minimum version and optional maximum version of the
master template to which they can be attached.
"""
def __init__(self, root, min_vers, max_vers=None, nsmap=None):
"""Initialize a slave template.
:param root: The root element of the template.
:param min_vers: The minimum permissible version of the master
template for this slave template to apply.
:param max_vers: An optional upper bound for the master
template version.
:param nsmap: An optional namespace dictionary to be
associated with the root element of the
template.
"""
super(SlaveTemplate, self).__init__(root, nsmap)
self.min_vers = min_vers
self.max_vers = max_vers
def __repr__(self):
"""Return string representation of the template."""
return ("<%s.%s object versions %s-%s at %#x>" %
(self.__class__.__module__, self.__class__.__name__,
self.min_vers, self.max_vers, id(self)))
def apply(self, master):
"""Hook method for determining slave applicability.
An overridable hook method used to determine if this template
is applicable as a slave to a given master template. This
version requires the master template to have a version number
between min_vers and max_vers.
:param master: The master template to test.
"""
# Does the master meet our minimum version requirement?
if master.version < self.min_vers:
return False
# How about our maximum version requirement?
if self.max_vers is not None and master.version > self.max_vers:
return False
return True
class TemplateBuilder(object):
"""Template builder.
This class exists to allow templates to be lazily built without
having to build them each time they are needed. It must be
subclassed, and the subclass must implement the construct()
method, which must return a Template (or subclass) instance. The
constructor will always return the template returned by
construct(), or, if it has a copy() method, a copy of that
template.
"""
_tmpl = None
def __new__(cls, copy=True):
"""Construct and return a template.
:param copy: If True (the default), a copy of the template
will be constructed and returned, if possible.
"""
# Do we need to construct the template?
if cls._tmpl is None:
tmp = super(TemplateBuilder, cls).__new__(cls)
# Construct the template
cls._tmpl = tmp.construct()
# If the template has a copy attribute, return the result of
# calling it
if copy and hasattr(cls._tmpl, 'copy'):
return cls._tmpl.copy()
# Return the template
return cls._tmpl
def construct(self):
"""Construct a template.
Called to construct a template instance, which it must return.
Only called once.
"""
raise NotImplementedError(_("subclasses must implement construct()!"))
def make_links(parent, selector=None):
"""Attach an Atom <links> element to the parent."""
elem = SubTemplateElement(parent, '{%s}link' % XMLNS_ATOM,
selector=selector)
elem.set('rel')
elem.set('type')
elem.set('href')
# Just for completeness...
return elem
def make_flat_dict(name, selector=None, subselector=None, ns=None):
"""Utility for simple XML templates.
Simple templates are templates that traditionally used
XMLDictSerializer with no metadata.
Returns a template element where the top-level element has the
given tag name, and where sub-elements have tag names derived
from the object's keys and text derived from the object's values.
This only works for flat dictionary objects, not dictionaries
containing nested lists or dictionaries.
"""
# Set up the names we need...
if ns is None:
elemname = name
tagname = Selector(0)
else:
elemname = '{%s}%s' % (ns, name)
tagname = lambda obj, do_raise=False: '{%s}%s' % (ns, obj[0])
if selector is None:
selector = name
# Build the root element
root = TemplateElement(elemname, selector=selector,
subselector=subselector)
# Build an element to represent all the keys and values
elem = SubTemplateElement(root, tagname, selector=get_items)
elem.text = 1
# Return the template
return root
|
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
import pytest
from flexmock import flexmock
from dockerfile_parse import DockerfileParser
from atomic_reactor.inner import DockerBuildWorkflow
from atomic_reactor.plugin import PreBuildPluginsRunner
from atomic_reactor.plugins.pre_add_dockerfile import AddDockerfilePlugin
from atomic_reactor.plugins.pre_add_labels_in_df import AddLabelsPlugin
from atomic_reactor.util import ImageName
from tests.constants import MOCK_SOURCE
from tests.fixtures import docker_tasker
class Y(object):
pass
class X(object):
image_id = "xxx"
source = Y()
source.dockerfile_path = None
source.path = None
base_image = ImageName(repo="qwe", tag="asd")
def test_adddockerfile_plugin(tmpdir, docker_tasker):
df_content = """
FROM fedora
RUN yum install -y python-django
CMD blabla"""
df = DockerfileParser(str(tmpdir))
df.content = df_content
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
workflow.builder = X
workflow.builder.df_path = df.dockerfile_path
workflow.builder.df_dir = str(tmpdir)
runner = PreBuildPluginsRunner(
docker_tasker,
workflow,
[{
'name': AddDockerfilePlugin.key,
'args': {'nvr': 'rhel-server-docker-7.1-20'}
}]
)
runner.run()
assert AddDockerfilePlugin.key is not None
expected_output = """
FROM fedora
RUN yum install -y python-django
ADD Dockerfile-rhel-server-docker-7.1-20 /root/buildinfo/Dockerfile-rhel-server-docker-7.1-20
CMD blabla"""
assert df.content == expected_output
def test_adddockerfile_todest(tmpdir, docker_tasker):
df_content = """
FROM fedora
RUN yum install -y python-django
CMD blabla"""
df = DockerfileParser(str(tmpdir))
df.content = df_content
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
workflow.builder = X
workflow.builder.df_path = df.dockerfile_path
workflow.builder.df_dir = str(tmpdir)
runner = PreBuildPluginsRunner(
docker_tasker,
workflow,
[{
'name': AddDockerfilePlugin.key,
'args': {'nvr': 'jboss-eap-6-docker-6.4-77',
'destdir': '/usr/share/doc/'}
}]
)
runner.run()
assert AddDockerfilePlugin.key is not None
expected_output = """
FROM fedora
RUN yum install -y python-django
ADD Dockerfile-jboss-eap-6-docker-6.4-77 /usr/share/doc/Dockerfile-jboss-eap-6-docker-6.4-77
CMD blabla"""
assert df.content == expected_output
def test_adddockerfile_nvr_from_labels(tmpdir, docker_tasker):
df_content = """
FROM fedora
RUN yum install -y python-django
LABEL Name="jboss-eap-6-docker" "Version"="6.4" "Release"=77
CMD blabla"""
df = DockerfileParser(str(tmpdir))
df.content = df_content
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
workflow.builder = X
workflow.builder.df_path = df.dockerfile_path
workflow.builder.df_dir = str(tmpdir)
runner = PreBuildPluginsRunner(
docker_tasker,
workflow,
[{
'name': AddDockerfilePlugin.key
}]
)
runner.run()
assert AddDockerfilePlugin.key is not None
assert "ADD Dockerfile-jboss-eap-6-docker-6.4-77 /root/buildinfo/Dockerfile-jboss-eap-6-docker-6.4-77" in df.content
def test_adddockerfile_nvr_from_labels2(tmpdir, docker_tasker):
df_content = """
FROM fedora
RUN yum install -y python-django
CMD blabla"""
df = DockerfileParser(str(tmpdir))
df.content = df_content
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
flexmock(workflow, base_image_inspect={"Config": {"Labels": {}}})
workflow.builder = X
workflow.builder.df_path = df.dockerfile_path
workflow.builder.df_dir = str(tmpdir)
runner = PreBuildPluginsRunner(
docker_tasker,
workflow,
[{
'name': AddLabelsPlugin.key,
'args': {'labels': {'Name': 'jboss-eap-6-docker',
'Version': '6.4',
'Release': '77'}}
},
{
'name': AddDockerfilePlugin.key
}]
)
runner.run()
assert AddDockerfilePlugin.key is not None
assert "ADD Dockerfile-jboss-eap-6-docker-6.4-77 /root/buildinfo/Dockerfile-jboss-eap-6-docker-6.4-77" in df.content
def test_adddockerfile_fails(tmpdir, docker_tasker):
df_content = """
FROM fedora
RUN yum install -y python-django
CMD blabla"""
df = DockerfileParser(str(tmpdir))
df.content = df_content
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
workflow.builder = X
workflow.builder.df_path = df.dockerfile_path
workflow.builder.df_dir = str(tmpdir)
runner = PreBuildPluginsRunner(
docker_tasker,
workflow,
[{
'name': AddDockerfilePlugin.key
}]
)
with pytest.raises(ValueError):
runner.run()
def test_adddockerfile_final(tmpdir, docker_tasker):
df_content = """
FROM fedora
RUN yum install -y python-django
CMD blabla"""
df = DockerfileParser(str(tmpdir))
df.content = df_content
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
workflow.builder = X
workflow.builder.df_path = df.dockerfile_path
workflow.builder.df_dir = str(tmpdir)
runner = PreBuildPluginsRunner(
docker_tasker,
workflow,
[{
'name': AddDockerfilePlugin.key,
'args': {'nvr': 'rhel-server-docker-7.1-20', "use_final_dockerfile": True}
}]
)
runner.run()
assert AddDockerfilePlugin.key is not None
expected_output = """
FROM fedora
RUN yum install -y python-django
ADD Dockerfile /root/buildinfo/Dockerfile-rhel-server-docker-7.1-20
CMD blabla"""
assert df.content == expected_output
|
|
import time
import math
import sys
import pygame
import jog2d
def simu(ns, shared_regs, change_regs):
print "simu started"
twopi = 2.0 * math.pi
pygame.init()
xmax = 1000
ymax = 600
size = (xmax, ymax)
screen = pygame.display.set_mode(size)
tpict = []
tpictply = []
pict1 = jog2d.Picture(800, 50, 950, 200)
tpict.append(pict1)
tpictply.append(pict1.ply)
tobs = []
tobsply = []
maze = False
if maze:
obs1 = jog2d.Obstacle(240, 20, 260, 450)
tobs.append(obs1)
tobsply.append(obs1.ply)
obs2 = jog2d.Obstacle(490, 150, 510, 580)
tobs.append(obs2)
tobsply.append(obs2.ply)
obs3 = jog2d.Obstacle(740, 20, 760, 450)
tobs.append(obs3)
tobsply.append(obs3.ply)
obs_l = jog2d.Obstacle(0, 0, 10, ymax)
tobs.append(obs_l)
tobsply.append(obs_l.ply)
obs_r = jog2d.Obstacle(xmax - 10, 0, xmax, ymax)
tobs.append(obs_r)
tobsply.append(obs_r.ply)
obs_t = jog2d.Obstacle(11, 0, xmax - 11, 10)
tobs.append(obs_t)
tobsply.append(obs_t.ply)
obs_b = jog2d.Obstacle(11, ymax - 10, xmax - 11, ymax)
tobs.append(obs_b)
tobsply.append(obs_b.ply)
jog = jog2d.Jog2d()
leds = [0, 0, 0, 0] # leds off
# init location
xjog = 500
yjog = 250
xjogi = int(round(xjog))
yjogi = int(round(yjog))
jog.translate(xjogi, yjogi)
# init heading
hdjog0 = jog.head
hdjog = 110.0
angrot = (hdjog - hdjog0)
jog.rotate(angrot)
# init sonars
sonar_hits = jog.obstacle(tobsply)
sonar_set = []
sonar_time = []
for ison in range(5):
sonar_set.append(False)
sonar_time.append(time.time())
addrs = 0x71 + ison
for regs in [0, 2, 3]:
st_reg = "i2c_0x%2.2x_%2.2d" % (addrs, regs)
shared_regs[st_reg] = 0
# init compass
addrs = 0x60
for regs in [1, 2, 3]:
st_reg = "i2c_0x%2.2x_%2.2d" % (addrs, regs)
shared_regs[st_reg] = 0
# init motion and odometry
motors_period_reg = 100
odo_left_actual = 0.0
odo_right_actual = 0.0
motor_left_direction = 1.0
motor_right_direction = 1.0
wheel_diameter = (2.32 * 2.54e-2)
wheel_radius = wheel_diameter / 2.0
n_ticks = 576
wheel_dist = 0.2
motor_left_ang_speed = 0.0
motor_right_ang_speed = 0.0
motor_left_lin_speed = 0.0
motor_right_lin_speed = 0.0
motor_left_dead_zone = 17.0 # 11
motor_right_dead_zone = 19.0 # 13
revsec = 4 # revol/sec at maximum PWM
motors_alpha = revsec * twopi / motors_period_reg
ang_speed = 0.0
shared_regs['fpga_0x06'] = motors_period_reg
t = 0.0
d_time = 0.05
while ns.alive:
start_time = time.time()
for event in pygame.event.get():
if event.type == pygame.QUIT:
ns.alive = False
print "trying to close pygame window ... no action"
sys.exit()
# print "vJOG alive ",t
#get the changes in JOG configuration
# and perform corresponding
update_motors = False
for keyval in change_regs:
key = keyval[0]
val = shared_regs[key]
#print "vJOG",key,val,t
change_regs.remove(keyval)
# FPGA registers
if key == 'fpga_0x18': # leds
#print "vJOG : leds"
for il in range(4):
leds[il] = (val >> il) & 0x1
elif key == 'fpga_0x20': # odometer 1
shared_regs[key] = int(odo_right_actual) & 65535
#print "vJOG : odo1",shared_regs[key]
elif key == 'fpga_0x22': # odometer 2
shared_regs[key] = int(odo_left_actual) & 65535
#print "vJOG : od2",shared_regs[key]
elif key == 'fpga_0x06': # period PWM (100 = 0x64)
#print "vJOG : period PWM"
motors_period_reg = val - 1
motors_alpha = revsec * twopi / motors_period_reg
#print "motors_period_reg",motors_period_reg
elif key == 'fpga_0x08': # value PWM1 (0 to 99)
#print "vJOG : value PWM1"
update_motors = True
elif key == 'fpga_0x0a': # direction PWM1 (0: forward)
#print "vJOG : direction PWM1"
update_motors = True
elif key == 'fpga_0x0c': # value PWM2 (0 to 99)
#print "vJOG : value PWM2"
update_motors = True
elif key == 'fpga_0x0e': # direction PWM2 (0: forward)
#print "vJOG : direction PWM2"
update_motors = True
if update_motors:
motor_left_direction = 1.0
if int(shared_regs['fpga_0x0a']) & 1:
motor_left_direction = -1.0
motor_left_ang_speed = shared_regs['fpga_0x08']
motor_left_ang_speed -= motor_left_dead_zone
if motor_left_ang_speed < 0:
motor_left_ang_speed = 0.0
if motor_left_ang_speed > motors_period_reg:
motor_left_ang_speed = motors_period_reg
motor_left_ang_speed *= (motors_alpha * motor_left_direction)
motor_right_direction = 1.0
if int(shared_regs['fpga_0x0e']) & 1:
motor_right_direction = -1.0
motor_right_ang_speed = shared_regs['fpga_0x0c']
motor_right_ang_speed -= motor_right_dead_zone
if motor_right_ang_speed < 0:
motor_right_ang_speed = 0.0
if motor_right_ang_speed > motors_period_reg:
motor_right_ang_speed = motors_period_reg
motor_right_ang_speed *= (motors_alpha * motor_right_direction)
# Sonar started
if key == "i2c_0x73_00": # sonarC (center) cmd
if val == 81:
sonar_set[2] = True
sonar_time[2] = time.time()
v = int(round(sonar_hits['beamsonarC'][1]))
#print sonar_hits,v
shared_regs['i2c_0x73_02'] = (v >> 8) & 0xff
shared_regs['i2c_0x73_03'] = v & 0xff
if key == "i2c_0x71_00": # sonarL (left) cmd
if val == 81:
sonar_set[0] = True
sonar_time[0] = time.time()
v = int(round(sonar_hits['beamsonarL'][1]))
#print sonar_hits,v
shared_regs['i2c_0x71_02'] = (v >> 8) & 0xff
shared_regs['i2c_0x71_03'] = v & 0xff
if key == "i2c_0x75_00": # sonarR (right) cmd
if val == 81:
sonar_set[4] = True
sonar_time[0] = time.time()
v = int(round(sonar_hits['beamsonarR'][1]))
#print sonar_hits,v
shared_regs['i2c_0x75_02'] = (v >> 8) & 0xff
shared_regs['i2c_0x75_03'] = v & 0xff
if sonar_set[2]:
if (time.time() - sonar_time[2]) > 0.05:
sonar_set[2] = False
if sonar_set[0]:
if (time.time() - sonar_time[0]) > 0.05:
sonar_set[0] = False
if sonar_set[4]:
if (time.time() - sonar_time[4]) > 0.05:
sonar_set[4] = False
# update location (display in cm)
thd = (90 - jog.head) * math.pi / 180.0
dxy = wheel_radius * (motor_left_ang_speed + motor_right_ang_speed) / 2.0
dx = dxy * math.cos(thd) * d_time
dy = dxy * math.sin(thd) * d_time
dthd = wheel_radius * (motor_left_ang_speed - motor_right_ang_speed) / wheel_dist
dthd *= d_time
xjog += (dx * 100.0)
yjog -= (dy * 100.0)
# check if robot locked
stopped = jog.check_stopped(tobsply)
# cannot move ... if stopped !!
if stopped:
#print t,"robot stopped cannot translate ",xjog, yjog, xjogBck, yjogBck
xjog -= (dx * 100.0)
yjog += (dy * 100.0)
if xjog < 0:
xjog = 0
if xjog > xmax:
xjog = xmax
if yjog < 0:
yjog = 0
if yjog > xmax:
yjog = xmax
#rotate jog
angrot = dthd * 180.0 / math.pi
jog.rotate(angrot)
# cannot rotate ... if stopped after rotation !!
stopped = jog.check_stopped(tobsply)
if stopped:
angrot = -dthd * 180.0 / math.pi
jog.rotate(angrot)
#print "robot stopped cannot rotate ",jog.head
#print "heading,rot, vx,vy ",jog.head,angrot,dx*100/d_time,dy*100/d_time
#print "motor_left_ang_speed %f revols/s"%(motor_left_ang_speed/twopi)
#print "motor_right_ang_speed %f revols/s"%(motor_right_ang_speed/twopi)
#print "dmotorLeftAngle %f revol"%(d_time*motor_left_ang_speed/twopi)
#print "dmotorRightAngle %f revol"%(d_time*motor_right_ang_speed/twopi)
angular = abs(d_time * motor_left_ang_speed / twopi)
odo_left_actual += (angular * n_ticks)
angular = abs(d_time * motor_right_ang_speed / twopi)
odo_right_actual += (angular * n_ticks)
#print "odos",odo_left_actual,odo_right_actual
keyor = 'fpga_0x20' # odometer 1
shared_regs[keyor] = int(odo_right_actual) & 65535
#print "vJOG : odo1",shared_regs[keyor],odo_right_actual
keyol = 'fpga_0x22' # odometer 2
shared_regs[keyol] = int(odo_left_actual) & 65535
#print "vJOG : odo2",shared_regs[keyol],odo_left_actual
# update IC2 compass
jog.head = (jog.head % 360.0)
shared_regs['i2c_0x60_01'] = int(round(jog.head * 256. / 360.0))
hdjog10 = int(round(jog.head * 10.0))
shared_regs['i2c_0x60_02'] = (hdjog10 >> 8) & 0xff
shared_regs['i2c_0x60_03'] = hdjog10 & 0xff
# translate JOG
dxjogi = int(round(xjog - xjogi))
dyjogi = int(round(yjog - yjogi))
xjogi += dxjogi
yjogi += dyjogi
jog.translate(dxjogi, dyjogi)
# display
screen.fill((127, 127, 127))
for obs in tobs:
obs.draw(screen, (100, 100, 255))
for pict in tpict:
pict.draw(screen, (31, 255, 31))
# update sonars
sonar_hits = jog.obstacle(tobsply)
jog.draw(screen, leds, sonar_hits)
# both looks similar
#pygame.display.update()
pygame.display.flip()
#print __name__,": leds" , leds
# first pass in simu loop completed -> end of init
if not ns.endOfInit:
ns.endOfInit = True
t += d_time
proc_time = time.time() - start_time
dtrem = d_time - proc_time
#print d_time,proc_time,dtrem
if dtrem < 0:
dtrem = 0.0
time.sleep(dtrem)
print "simu stopped", t
|
|
#
# The Python Imaging Library.
# $Id$
#
# JPEG (JFIF) file handling
#
# See "Digital Compression and Coding of Continous-Tone Still Images,
# Part 1, Requirements and Guidelines" (CCITT T.81 / ISO 10918-1)
#
# History:
# 1995-09-09 fl Created
# 1995-09-13 fl Added full parser
# 1996-03-25 fl Added hack to use the IJG command line utilities
# 1996-05-05 fl Workaround Photoshop 2.5 CMYK polarity bug
# 1996-05-28 fl Added draft support, JFIF version (0.1)
# 1996-12-30 fl Added encoder options, added progression property (0.2)
# 1997-08-27 fl Save mode 1 images as BW (0.3)
# 1998-07-12 fl Added YCbCr to draft and save methods (0.4)
# 1998-10-19 fl Don't hang on files using 16-bit DQT's (0.4.1)
# 2001-04-16 fl Extract DPI settings from JFIF files (0.4.2)
# 2002-07-01 fl Skip pad bytes before markers; identify Exif files (0.4.3)
# 2003-04-25 fl Added experimental EXIF decoder (0.5)
# 2003-06-06 fl Added experimental EXIF GPSinfo decoder
# 2003-09-13 fl Extract COM markers
# 2009-09-06 fl Added icc_profile support (from Florian Hoech)
# 2009-03-06 fl Changed CMYK handling; always use Adobe polarity (0.6)
# 2009-03-08 fl Added subsampling support (from Justin Huff).
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-1996 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.6"
import array
import struct
import io
from struct import unpack
from PIL import Image, ImageFile, TiffImagePlugin, _binary
from PIL.JpegPresets import presets
from PIL._util import isStringType
i8 = _binary.i8
o8 = _binary.o8
i16 = _binary.i16be
i32 = _binary.i32be
#
# Parser
def Skip(self, marker):
n = i16(self.fp.read(2))-2
ImageFile._safe_read(self.fp, n)
def APP(self, marker):
#
# Application marker. Store these in the APP dictionary.
# Also look for well-known application markers.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
app = "APP%d" % (marker & 15)
self.app[app] = s # compatibility
self.applist.append((app, s))
if marker == 0xFFE0 and s[:4] == b"JFIF":
# extract JFIF information
self.info["jfif"] = version = i16(s, 5) # version
self.info["jfif_version"] = divmod(version, 256)
# extract JFIF properties
try:
jfif_unit = i8(s[7])
jfif_density = i16(s, 8), i16(s, 10)
except:
pass
else:
if jfif_unit == 1:
self.info["dpi"] = jfif_density
self.info["jfif_unit"] = jfif_unit
self.info["jfif_density"] = jfif_density
elif marker == 0xFFE1 and s[:5] == b"Exif\0":
# extract Exif information (incomplete)
self.info["exif"] = s # FIXME: value will change
elif marker == 0xFFE2 and s[:5] == b"FPXR\0":
# extract FlashPix information (incomplete)
self.info["flashpix"] = s # FIXME: value will change
elif marker == 0xFFE2 and s[:12] == b"ICC_PROFILE\0":
# Since an ICC profile can be larger than the maximum size of
# a JPEG marker (64K), we need provisions to split it into
# multiple markers. The format defined by the ICC specifies
# one or more APP2 markers containing the following data:
# Identifying string ASCII "ICC_PROFILE\0" (12 bytes)
# Marker sequence number 1, 2, etc (1 byte)
# Number of markers Total of APP2's used (1 byte)
# Profile data (remainder of APP2 data)
# Decoders should use the marker sequence numbers to
# reassemble the profile, rather than assuming that the APP2
# markers appear in the correct sequence.
self.icclist.append(s)
elif marker == 0xFFEE and s[:5] == b"Adobe":
self.info["adobe"] = i16(s, 5)
# extract Adobe custom properties
try:
adobe_transform = i8(s[1])
except:
pass
else:
self.info["adobe_transform"] = adobe_transform
elif marker == 0xFFE2 and s[:4] == b"MPF\0":
# extract MPO information
self.info["mp"] = s[4:]
# offset is current location minus buffer size
# plus constant header size
self.info["mpoffset"] = self.fp.tell() - n + 4
def COM(self, marker):
#
# Comment marker. Store these in the APP dictionary.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
self.app["COM"] = s # compatibility
self.applist.append(("COM", s))
def SOF(self, marker):
#
# Start of frame marker. Defines the size and mode of the
# image. JPEG is colour blind, so we use some simple
# heuristics to map the number of layers to an appropriate
# mode. Note that this could be made a bit brighter, by
# looking for JFIF and Adobe APP markers.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
self.size = i16(s[3:]), i16(s[1:])
self.bits = i8(s[0])
if self.bits != 8:
raise SyntaxError("cannot handle %d-bit layers" % self.bits)
self.layers = i8(s[5])
if self.layers == 1:
self.mode = "L"
elif self.layers == 3:
self.mode = "RGB"
elif self.layers == 4:
self.mode = "CMYK"
else:
raise SyntaxError("cannot handle %d-layer images" % self.layers)
if marker in [0xFFC2, 0xFFC6, 0xFFCA, 0xFFCE]:
self.info["progressive"] = self.info["progression"] = 1
if self.icclist:
# fixup icc profile
self.icclist.sort() # sort by sequence number
if i8(self.icclist[0][13]) == len(self.icclist):
profile = []
for p in self.icclist:
profile.append(p[14:])
icc_profile = b"".join(profile)
else:
icc_profile = None # wrong number of fragments
self.info["icc_profile"] = icc_profile
self.icclist = None
for i in range(6, len(s), 3):
t = s[i:i+3]
# 4-tuples: id, vsamp, hsamp, qtable
self.layer.append((t[0], i8(t[1])//16, i8(t[1]) & 15, i8(t[2])))
def DQT(self, marker):
#
# Define quantization table. Support baseline 8-bit tables
# only. Note that there might be more than one table in
# each marker.
# FIXME: The quantization tables can be used to estimate the
# compression quality.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
while len(s):
if len(s) < 65:
raise SyntaxError("bad quantization table marker")
v = i8(s[0])
if v//16 == 0:
self.quantization[v & 15] = array.array("b", s[1:65])
s = s[65:]
else:
return # FIXME: add code to read 16-bit tables!
# raise SyntaxError, "bad quantization table element size"
#
# JPEG marker table
MARKER = {
0xFFC0: ("SOF0", "Baseline DCT", SOF),
0xFFC1: ("SOF1", "Extended Sequential DCT", SOF),
0xFFC2: ("SOF2", "Progressive DCT", SOF),
0xFFC3: ("SOF3", "Spatial lossless", SOF),
0xFFC4: ("DHT", "Define Huffman table", Skip),
0xFFC5: ("SOF5", "Differential sequential DCT", SOF),
0xFFC6: ("SOF6", "Differential progressive DCT", SOF),
0xFFC7: ("SOF7", "Differential spatial", SOF),
0xFFC8: ("JPG", "Extension", None),
0xFFC9: ("SOF9", "Extended sequential DCT (AC)", SOF),
0xFFCA: ("SOF10", "Progressive DCT (AC)", SOF),
0xFFCB: ("SOF11", "Spatial lossless DCT (AC)", SOF),
0xFFCC: ("DAC", "Define arithmetic coding conditioning", Skip),
0xFFCD: ("SOF13", "Differential sequential DCT (AC)", SOF),
0xFFCE: ("SOF14", "Differential progressive DCT (AC)", SOF),
0xFFCF: ("SOF15", "Differential spatial (AC)", SOF),
0xFFD0: ("RST0", "Restart 0", None),
0xFFD1: ("RST1", "Restart 1", None),
0xFFD2: ("RST2", "Restart 2", None),
0xFFD3: ("RST3", "Restart 3", None),
0xFFD4: ("RST4", "Restart 4", None),
0xFFD5: ("RST5", "Restart 5", None),
0xFFD6: ("RST6", "Restart 6", None),
0xFFD7: ("RST7", "Restart 7", None),
0xFFD8: ("SOI", "Start of image", None),
0xFFD9: ("EOI", "End of image", None),
0xFFDA: ("SOS", "Start of scan", Skip),
0xFFDB: ("DQT", "Define quantization table", DQT),
0xFFDC: ("DNL", "Define number of lines", Skip),
0xFFDD: ("DRI", "Define restart interval", Skip),
0xFFDE: ("DHP", "Define hierarchical progression", SOF),
0xFFDF: ("EXP", "Expand reference component", Skip),
0xFFE0: ("APP0", "Application segment 0", APP),
0xFFE1: ("APP1", "Application segment 1", APP),
0xFFE2: ("APP2", "Application segment 2", APP),
0xFFE3: ("APP3", "Application segment 3", APP),
0xFFE4: ("APP4", "Application segment 4", APP),
0xFFE5: ("APP5", "Application segment 5", APP),
0xFFE6: ("APP6", "Application segment 6", APP),
0xFFE7: ("APP7", "Application segment 7", APP),
0xFFE8: ("APP8", "Application segment 8", APP),
0xFFE9: ("APP9", "Application segment 9", APP),
0xFFEA: ("APP10", "Application segment 10", APP),
0xFFEB: ("APP11", "Application segment 11", APP),
0xFFEC: ("APP12", "Application segment 12", APP),
0xFFED: ("APP13", "Application segment 13", APP),
0xFFEE: ("APP14", "Application segment 14", APP),
0xFFEF: ("APP15", "Application segment 15", APP),
0xFFF0: ("JPG0", "Extension 0", None),
0xFFF1: ("JPG1", "Extension 1", None),
0xFFF2: ("JPG2", "Extension 2", None),
0xFFF3: ("JPG3", "Extension 3", None),
0xFFF4: ("JPG4", "Extension 4", None),
0xFFF5: ("JPG5", "Extension 5", None),
0xFFF6: ("JPG6", "Extension 6", None),
0xFFF7: ("JPG7", "Extension 7", None),
0xFFF8: ("JPG8", "Extension 8", None),
0xFFF9: ("JPG9", "Extension 9", None),
0xFFFA: ("JPG10", "Extension 10", None),
0xFFFB: ("JPG11", "Extension 11", None),
0xFFFC: ("JPG12", "Extension 12", None),
0xFFFD: ("JPG13", "Extension 13", None),
0xFFFE: ("COM", "Comment", COM)
}
def _accept(prefix):
return prefix[0:1] == b"\377"
##
# Image plugin for JPEG and JFIF images.
class JpegImageFile(ImageFile.ImageFile):
format = "JPEG"
format_description = "JPEG (ISO 10918)"
def _open(self):
s = self.fp.read(1)
if i8(s[0]) != 255:
raise SyntaxError("not a JPEG file")
# Create attributes
self.bits = self.layers = 0
# JPEG specifics (internal)
self.layer = []
self.huffman_dc = {}
self.huffman_ac = {}
self.quantization = {}
self.app = {} # compatibility
self.applist = []
self.icclist = []
while True:
i = i8(s)
if i == 0xFF:
s = s + self.fp.read(1)
i = i16(s)
else:
# Skip non-0xFF junk
s = b"\xff"
continue
if i in MARKER:
name, description, handler = MARKER[i]
# print hex(i), name, description
if handler is not None:
handler(self, i)
if i == 0xFFDA: # start of scan
rawmode = self.mode
if self.mode == "CMYK":
rawmode = "CMYK;I" # assume adobe conventions
self.tile = [("jpeg", (0, 0) + self.size, 0,
(rawmode, ""))]
# self.__offset = self.fp.tell()
break
s = self.fp.read(1)
elif i == 0 or i == 0xFFFF:
# padded marker or junk; move on
s = b"\xff"
else:
raise SyntaxError("no marker found")
def draft(self, mode, size):
if len(self.tile) != 1:
return
d, e, o, a = self.tile[0]
scale = 0
if a[0] == "RGB" and mode in ["L", "YCbCr"]:
self.mode = mode
a = mode, ""
if size:
scale = max(self.size[0] // size[0], self.size[1] // size[1])
for s in [8, 4, 2, 1]:
if scale >= s:
break
e = e[0], e[1], (e[2]-e[0]+s-1)//s+e[0], (e[3]-e[1]+s-1)//s+e[1]
self.size = ((self.size[0]+s-1)//s, (self.size[1]+s-1)//s)
scale = s
self.tile = [(d, e, o, a)]
self.decoderconfig = (scale, 0)
return self
def load_djpeg(self):
# ALTERNATIVE: handle JPEGs via the IJG command line utilities
import subprocess
import tempfile
import os
f, path = tempfile.mkstemp()
os.close(f)
if os.path.exists(self.filename):
subprocess.check_call(["djpeg", "-outfile", path, self.filename])
else:
raise ValueError("Invalid Filename")
try:
self.im = Image.core.open_ppm(path)
finally:
try:
os.unlink(path)
except:
pass
self.mode = self.im.mode
self.size = self.im.size
self.tile = []
def _getexif(self):
return _getexif(self)
def _getmp(self):
return _getmp(self)
def _fixup(value):
# Helper function for _getexif() and _getmp()
if len(value) == 1:
return value[0]
return value
def _getexif(self):
# Extract EXIF information. This method is highly experimental,
# and is likely to be replaced with something better in a future
# version.
# The EXIF record consists of a TIFF file embedded in a JPEG
# application marker (!).
try:
data = self.info["exif"]
except KeyError:
return None
file = io.BytesIO(data[6:])
head = file.read(8)
exif = {}
# process dictionary
info = TiffImagePlugin.ImageFileDirectory(head)
info.load(file)
for key, value in info.items():
exif[key] = _fixup(value)
# get exif extension
try:
file.seek(exif[0x8769])
except KeyError:
pass
else:
info = TiffImagePlugin.ImageFileDirectory(head)
info.load(file)
for key, value in info.items():
exif[key] = _fixup(value)
# get gpsinfo extension
try:
file.seek(exif[0x8825])
except KeyError:
pass
else:
info = TiffImagePlugin.ImageFileDirectory(head)
info.load(file)
exif[0x8825] = gps = {}
for key, value in info.items():
gps[key] = _fixup(value)
return exif
def _getmp(self):
# Extract MP information. This method was inspired by the "highly
# experimental" _getexif version that's been in use for years now,
# itself based on the ImageFileDirectory class in the TIFF plug-in.
# The MP record essentially consists of a TIFF file embedded in a JPEG
# application marker.
try:
data = self.info["mp"]
except KeyError:
return None
file = io.BytesIO(data)
head = file.read(8)
endianness = '>' if head[:4] == b'\x4d\x4d\x00\x2a' else '<'
mp = {}
# process dictionary
info = TiffImagePlugin.ImageFileDirectory(head)
info.load(file)
for key, value in info.items():
mp[key] = _fixup(value)
# it's an error not to have a number of images
try:
quant = mp[0xB001]
except KeyError:
raise SyntaxError("malformed MP Index (no number of images)")
# get MP entries
try:
mpentries = []
for entrynum in range(0, quant):
rawmpentry = mp[0xB002][entrynum * 16:(entrynum + 1) * 16]
unpackedentry = unpack('{0}LLLHH'.format(endianness), rawmpentry)
labels = ('Attribute', 'Size', 'DataOffset', 'EntryNo1',
'EntryNo2')
mpentry = dict(zip(labels, unpackedentry))
mpentryattr = {
'DependentParentImageFlag': bool(mpentry['Attribute'] &
(1 << 31)),
'DependentChildImageFlag': bool(mpentry['Attribute'] &
(1 << 30)),
'RepresentativeImageFlag': bool(mpentry['Attribute'] &
(1 << 29)),
'Reserved': (mpentry['Attribute'] & (3 << 27)) >> 27,
'ImageDataFormat': (mpentry['Attribute'] & (7 << 24)) >> 24,
'MPType': mpentry['Attribute'] & 0x00FFFFFF
}
if mpentryattr['ImageDataFormat'] == 0:
mpentryattr['ImageDataFormat'] = 'JPEG'
else:
raise SyntaxError("unsupported picture format in MPO")
mptypemap = {
0x000000: 'Undefined',
0x010001: 'Large Thumbnail (VGA Equivalent)',
0x010002: 'Large Thumbnail (Full HD Equivalent)',
0x020001: 'Multi-Frame Image (Panorama)',
0x020002: 'Multi-Frame Image: (Disparity)',
0x020003: 'Multi-Frame Image: (Multi-Angle)',
0x030000: 'Baseline MP Primary Image'
}
mpentryattr['MPType'] = mptypemap.get(mpentryattr['MPType'],
'Unknown')
mpentry['Attribute'] = mpentryattr
mpentries.append(mpentry)
mp[0xB002] = mpentries
except KeyError:
raise SyntaxError("malformed MP Index (bad MP Entry)")
# Next we should try and parse the individual image unique ID list;
# we don't because I've never seen this actually used in a real MPO
# file and so can't test it.
return mp
# --------------------------------------------------------------------
# stuff to save JPEG files
RAWMODE = {
"1": "L",
"L": "L",
"RGB": "RGB",
"RGBA": "RGB",
"RGBX": "RGB",
"CMYK": "CMYK;I", # assume adobe conventions
"YCbCr": "YCbCr",
}
zigzag_index = ( 0, 1, 5, 6, 14, 15, 27, 28,
2, 4, 7, 13, 16, 26, 29, 42,
3, 8, 12, 17, 25, 30, 41, 43,
9, 11, 18, 24, 31, 40, 44, 53,
10, 19, 23, 32, 39, 45, 52, 54,
20, 22, 33, 38, 46, 51, 55, 60,
21, 34, 37, 47, 50, 56, 59, 61,
35, 36, 48, 49, 57, 58, 62, 63)
samplings = {(1, 1, 1, 1, 1, 1): 0,
(2, 1, 1, 1, 1, 1): 1,
(2, 2, 1, 1, 1, 1): 2,
}
def convert_dict_qtables(qtables):
qtables = [qtables[key] for key in range(len(qtables)) if key in qtables]
for idx, table in enumerate(qtables):
qtables[idx] = [table[i] for i in zigzag_index]
return qtables
def get_sampling(im):
# There's no subsampling when image have only 1 layer
# (grayscale images) or when they are CMYK (4 layers),
# so set subsampling to default value.
#
# NOTE: currently Pillow can't encode JPEG to YCCK format.
# If YCCK support is added in the future, subsampling code will have
# to be updated (here and in JpegEncode.c) to deal with 4 layers.
if not hasattr(im, 'layers') or im.layers in (1, 4):
return -1
sampling = im.layer[0][1:3] + im.layer[1][1:3] + im.layer[2][1:3]
return samplings.get(sampling, -1)
def _save(im, fp, filename):
try:
rawmode = RAWMODE[im.mode]
except KeyError:
raise IOError("cannot write mode %s as JPEG" % im.mode)
info = im.encoderinfo
dpi = info.get("dpi", (0, 0))
quality = info.get("quality", 0)
subsampling = info.get("subsampling", -1)
qtables = info.get("qtables")
if quality == "keep":
quality = 0
subsampling = "keep"
qtables = "keep"
elif quality in presets:
preset = presets[quality]
quality = 0
subsampling = preset.get('subsampling', -1)
qtables = preset.get('quantization')
elif not isinstance(quality, int):
raise ValueError("Invalid quality setting")
else:
if subsampling in presets:
subsampling = presets[subsampling].get('subsampling', -1)
if isStringType(qtables) and qtables in presets:
qtables = presets[qtables].get('quantization')
if subsampling == "4:4:4":
subsampling = 0
elif subsampling == "4:2:2":
subsampling = 1
elif subsampling == "4:1:1":
subsampling = 2
elif subsampling == "keep":
if im.format != "JPEG":
raise ValueError(
"Cannot use 'keep' when original image is not a JPEG")
subsampling = get_sampling(im)
def validate_qtables(qtables):
if qtables is None:
return qtables
if isStringType(qtables):
try:
lines = [int(num) for line in qtables.splitlines()
for num in line.split('#', 1)[0].split()]
except ValueError:
raise ValueError("Invalid quantization table")
else:
qtables = [lines[s:s+64] for s in range(0, len(lines), 64)]
if isinstance(qtables, (tuple, list, dict)):
if isinstance(qtables, dict):
qtables = convert_dict_qtables(qtables)
elif isinstance(qtables, tuple):
qtables = list(qtables)
if not (0 < len(qtables) < 5):
raise ValueError("None or too many quantization tables")
for idx, table in enumerate(qtables):
try:
if len(table) != 64:
raise
table = array.array('b', table)
except TypeError:
raise ValueError("Invalid quantization table")
else:
qtables[idx] = list(table)
return qtables
if qtables == "keep":
if im.format != "JPEG":
raise ValueError(
"Cannot use 'keep' when original image is not a JPEG")
qtables = getattr(im, "quantization", None)
qtables = validate_qtables(qtables)
extra = b""
icc_profile = info.get("icc_profile")
if icc_profile:
ICC_OVERHEAD_LEN = 14
MAX_BYTES_IN_MARKER = 65533
MAX_DATA_BYTES_IN_MARKER = MAX_BYTES_IN_MARKER - ICC_OVERHEAD_LEN
markers = []
while icc_profile:
markers.append(icc_profile[:MAX_DATA_BYTES_IN_MARKER])
icc_profile = icc_profile[MAX_DATA_BYTES_IN_MARKER:]
i = 1
for marker in markers:
size = struct.pack(">H", 2 + ICC_OVERHEAD_LEN + len(marker))
extra += (b"\xFF\xE2" + size + b"ICC_PROFILE\0" + o8(i) +
o8(len(markers)) + marker)
i += 1
# get keyword arguments
im.encoderconfig = (
quality,
# "progressive" is the official name, but older documentation
# says "progression"
# FIXME: issue a warning if the wrong form is used (post-1.1.7)
"progressive" in info or "progression" in info,
info.get("smooth", 0),
"optimize" in info,
info.get("streamtype", 0),
dpi[0], dpi[1],
subsampling,
qtables,
extra,
info.get("exif", b"")
)
# if we optimize, libjpeg needs a buffer big enough to hold the whole image
# in a shot. Guessing on the size, at im.size bytes. (raw pizel size is
# channels*size, this is a value that's been used in a django patch.
# https://github.com/jdriscoll/django-imagekit/issues/50
bufsize = 0
if "optimize" in info or "progressive" in info or "progression" in info:
# keep sets quality to 0, but the actual value may be high.
if quality >= 95 or quality == 0:
bufsize = 2 * im.size[0] * im.size[1]
else:
bufsize = im.size[0] * im.size[1]
# The exif info needs to be written as one block, + APP1, + one spare byte.
# Ensure that our buffer is big enough
bufsize = max(ImageFile.MAXBLOCK, bufsize, len(info.get("exif", b"")) + 5)
ImageFile._save(im, fp, [("jpeg", (0, 0)+im.size, 0, rawmode)], bufsize)
def _save_cjpeg(im, fp, filename):
# ALTERNATIVE: handle JPEGs via the IJG command line utilities.
import os
import subprocess
tempfile = im._dump()
subprocess.check_call(["cjpeg", "-outfile", filename, tempfile])
try:
os.unlink(file)
except:
pass
##
# Factory for making JPEG and MPO instances
def jpeg_factory(fp=None, filename=None):
im = JpegImageFile(fp, filename)
mpheader = im._getmp()
try:
if mpheader[45057] > 1:
# It's actually an MPO
from .MpoImagePlugin import MpoImageFile
im = MpoImageFile(fp, filename)
except (TypeError, IndexError):
# It is really a JPEG
pass
return im
# -------------------------------------------------------------------q-
# Registry stuff
Image.register_open("JPEG", jpeg_factory, _accept)
Image.register_save("JPEG", _save)
Image.register_extension("JPEG", ".jfif")
Image.register_extension("JPEG", ".jpe")
Image.register_extension("JPEG", ".jpg")
Image.register_extension("JPEG", ".jpeg")
Image.register_mime("JPEG", "image/jpeg")
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import shutil
import subprocess
import tempfile
import urllib
from distutils.dir_util import copy_tree
from threading import current_thread
import os
import time
from config import Config
from exception import GitRepositorySynchronizationException
from git import *
from ...util.asyncscheduledtask import AbstractAsyncScheduledTask, ScheduledExecutor
from ...util.log import LogFactory
class AgentGitHandler:
"""
Handles all the git artifact management tasks related to a cartridge
"""
def __init__(self):
pass
log = LogFactory().get_log(__name__)
__git_repositories = {}
# (tenant_id => GitRepository)
@staticmethod
def sync_initial_local_artifacts(git_repo):
# init git repo
AgentGitHandler.init(git_repo.local_repo_path)
# add remote repos
return AgentGitHandler.add_remote(git_repo)
@staticmethod
def add_remote(git_repo):
# add origin remote
output, errors = AgentGitHandler.execute_git_command(["remote", "add", "origin", git_repo.repo_url],
git_repo.local_repo_path)
if len(output) > 0:
raise GitRepositorySynchronizationException("Error in adding remote origin %s for local repository %s"
% (git_repo.repo_url, git_repo.local_repo_path))
# fetch
output, errors = AgentGitHandler.execute_git_command(["fetch"], git_repo.local_repo_path)
if "Resolving deltas: 100%" not in output:
raise GitRepositorySynchronizationException(
"Error in fetching from remote origin %s for local repository %s"
% (git_repo.repo_url, git_repo.local_repo_path))
# checkout master
output, errors = AgentGitHandler.execute_git_command(["checkout", "master"], git_repo.local_repo_path)
if "Branch master set up to track remote branch master from origin." not in output:
raise GitRepositorySynchronizationException("Error in checking out master branch %s for local repository %s"
% (git_repo.repo_url, git_repo.local_repo_path))
return True
@staticmethod
def init(path):
output, errors = AgentGitHandler.execute_git_command(["init"], path)
if "Initialized empty Git repository in" not in output:
AgentGitHandler.log.exception("Initializing local repo at %s failed: %s" % (path, output))
raise Exception("Initializing local repo at %s failed" % path)
@staticmethod
def is_valid_git_repository(git_repo):
output, errors = AgentGitHandler.execute_git_command(["show-ref"], git_repo.local_repo_path)
if len(output) > 0:
refs = output.split("\n")
for ref in refs:
ref = ref.strip()
if len(ref) > 0:
ref = ref.split(" ")
try:
AgentGitHandler.execute_git_command(["show", ref[0].strip()], git_repo.local_repo_path)
except RuntimeError:
return False
return True
else:
return False
@staticmethod
def pull(git_repo):
# check if modified files are present
modified = AgentGitHandler.has_modified_files(git_repo.local_repo_path)
if modified:
if Config.is_commits_enabled:
AgentGitHandler.log.debug(
"Un-staged files exist in working directory. Aborting git pull for this iteration...")
return
else:
AgentGitHandler.log.warn("Changes detected in working directory but COMMIT_ENABLED is set to false!")
AgentGitHandler.log.warn("Attempting to reset the working directory")
AgentGitHandler.execute_git_command(["reset"], repo_path=git_repo.local_repo_path)
# HEAD before pull
(init_head, init_errors) = AgentGitHandler.execute_git_command(["rev-parse", "HEAD"], git_repo.local_repo_path)
repo = Repo(git_repo.local_repo_path)
AgentGitHandler.execute_git_command(["pull", "--rebase", "origin", git_repo.branch], git_repo.local_repo_path)
AgentGitHandler.log.debug("Git pull rebase executed in checkout job")
if repo.is_dirty():
AgentGitHandler.log.error("Git pull operation in checkout job left the repository in dirty state")
AgentGitHandler.log.error(
"Git pull operation on remote %s for tenant %s failed" % (git_repo.repo_url, git_repo.tenant_id))
# HEAD after pull
(end_head, end_errors) = AgentGitHandler.execute_git_command(["rev-parse", "HEAD"], git_repo.local_repo_path)
# check if HEAD was updated
if init_head != end_head:
AgentGitHandler.log.debug("Artifacts were updated as a result of the pull operation, thread: %s - %s" %
(current_thread().getName(), current_thread().ident))
return True
else:
return False
@staticmethod
def clone(git_repo):
try:
# create a temporary location to clone
temp_repo_path = os.path.join(tempfile.gettempdir(), "pca_temp_" + git_repo.tenant_id)
if os.path.isdir(temp_repo_path) and os.listdir(temp_repo_path) != []:
GitUtils.delete_folder_tree(temp_repo_path)
GitUtils.create_dir(temp_repo_path)
# clone the repo to a temporary location first to avoid conflicts
AgentGitHandler.log.debug(
"Cloning artifacts from URL: %s to temp location: %s" % (git_repo.repo_url, temp_repo_path))
Repo.clone_from(git_repo.auth_url, temp_repo_path, branch=git_repo.branch)
# move the cloned dir to application path
copy_tree(temp_repo_path, git_repo.local_repo_path)
AgentGitHandler.log.info("Git clone operation for tenant %s successful" % git_repo.tenant_id)
return git_repo
except GitCommandError as e:
raise GitRepositorySynchronizationException("Error while cloning repository for tenant %s: %s" % (
git_repo.tenant_id, e))
# @staticmethod
# def checkout_remote_branch(git_repo):
# try:
# # create a temporary location to clone
# temp_repo_path = os.path.join(tempfile.gettempdir(), "pca_temp_" + git_repo.tenant_id)
# if os.path.isdir(temp_repo_path) and os.listdir(temp_repo_path) != []:
# GitUtils.delete_folder_tree(temp_repo_path)
# GitUtils.create_dir(temp_repo_path)
#
# # clone the repo to a temporary location first to avoid conflicts
# AgentGitHandler.log.debug(
# "Cloning artifacts from URL: %s to temp location: %s" % (git_repo.repo_url, temp_repo_path))
# Repo.clone_from(git_repo.auth_url, temp_repo_path)
#
# local_repo = Repo.init(temp_repo_path)
# origin = local_repo.create_remote('origin', local_repo.auth_url)
# assert origin.exists()
# # branch name = "tenant_<tenant_id>"
# origin.fetch("+refs/heads/" + git_repo.branch + ":refs/remotes/origin/" + git_repo.branch)
# AgentGitHandler.execute_git_command(["checkout", git_repo.branch], temp_repo_path)
#
# # move the cloned dir to application path
# copy_tree(temp_repo_path, git_repo.local_repo_path)
# AgentGitHandler.log.info("Git clone operation for tenant %s successful" % git_repo.tenant_id)
# return git_repo
# except GitCommandError as e:
# raise GitRepositorySynchronizationException("Error while cloning repository for tenant %s: %s" % (
# git_repo.tenant_id, e))
@staticmethod
def retry_clone(git_repo):
"""Retry 'git clone' operation for defined number of attempts with defined intervals
"""
git_clone_successful = False
# Read properties from agent.conf
max_retry_attempts = int(Config.artifact_clone_retry_count)
retry_interval = int(Config.artifact_clone_retry_interval)
retry_attempts = 0
# Iterate until git clone is successful or reaches max retry attempts
while git_clone_successful is False and retry_attempts < max_retry_attempts:
try:
retry_attempts += 1
AgentGitHandler.clone(git_repo)
AgentGitHandler.log.info(
"Retrying attempt to git clone operation for tenant %s successful" % git_repo.tenant_id)
git_clone_successful = True
except GitRepositorySynchronizationException as e:
AgentGitHandler.log.exception("Retrying git clone attempt %s failed: %s" % (retry_attempts, e))
if retry_attempts < max_retry_attempts:
time.sleep(retry_interval)
else:
raise GitRepositorySynchronizationException("All attempts failed while retrying git clone: %s"
% e)
@staticmethod
def add_repo(git_repo):
AgentGitHandler.__git_repositories[git_repo.tenant_id] = git_repo
@staticmethod
def get_repo(tenant_id):
"""
:param int tenant_id:
:return: GitRepository object
:rtype: GitRepository
"""
tenant_id = str(tenant_id)
if tenant_id in AgentGitHandler.__git_repositories:
return AgentGitHandler.__git_repositories[tenant_id]
return None
@staticmethod
def clear_repo(tenant_id):
if tenant_id in AgentGitHandler.__git_repositories:
del AgentGitHandler.__git_repositories[tenant_id]
@staticmethod
def create_git_repo(repo_info, branch=None):
git_repo = GitRepository()
git_repo.tenant_id = repo_info.tenant_id
git_repo.local_repo_path = repo_info.repo_path
git_repo.repo_url = repo_info.repo_url
git_repo.auth_url = AgentGitHandler.create_auth_url(repo_info)
git_repo.repo_username = repo_info.repo_username
git_repo.repo_password = repo_info.repo_password
git_repo.commit_enabled = repo_info.commit_enabled
if branch:
git_repo.branch = branch
else:
git_repo.branch = 'master'
git_repo.cloned = False
return git_repo
@staticmethod
def create_auth_url(repo_info):
# Accepted repo url formats
# "https://host.com/path/to/repo.git"
# "https://username@host.org/path/to/repo.git"
# "https://username:password@host.org/path/to/repo.git" NOT RECOMMENDED
# IMPORTANT: if the credentials are provided in the repo url, they must be url encoded
if repo_info.repo_username is not None or repo_info.repo_password is not None:
# credentials provided, have to modify url
repo_url = repo_info.repo_url
url_split = repo_url.split("://", 1)
# urlencode repo username and password
urlencoded_username = urllib.quote(repo_info.repo_username.strip(), safe='')
urlencoded_password = urllib.quote(repo_info.repo_password.strip(), safe='')
if "@" in url_split[1]:
# credentials seem to be in the url, check
at_split = url_split[1].split("@", 1)
if ":" in at_split[0]:
# both username and password are in the url, return as is
return repo_info.repo_url
else:
# only username is provided, need to include password
username_in_url = at_split[0].split(":", 1)[0]
return str(url_split[0] + "://" + username_in_url + ":" + urlencoded_password
+ "@" + at_split[1])
else:
# no credentials in the url, need to include username and password
return str(url_split[0] + "://" + urlencoded_username + ":" + urlencoded_password + "@" + url_split[1])
# no credentials specified, return as is
return repo_info.repo_url
@staticmethod
def has_modified_files(repo_path):
(output, errors) = AgentGitHandler.execute_git_command(["status"], repo_path=repo_path)
if "nothing to commit" in output:
return False
else:
return True
@staticmethod
def stage_all(repo_path):
(output, errors) = AgentGitHandler.execute_git_command(["add", "--all"], repo_path=repo_path)
return True if errors.strip() == "" else False
@staticmethod
def find_between(s, first, last):
try:
start = s.index(first) + len(first)
end = s.index(last, start)
return s[start:end]
except ValueError:
return ""
@staticmethod
def schedule_artifact_update_task(repo_info, auto_checkout, auto_commit, update_interval):
git_repo = AgentGitHandler.get_repo(repo_info.tenant_id)
if git_repo is None:
AgentGitHandler.log.error("Unable to schedule artifact sync task, repositoryContext null for tenant %s"
% repo_info.tenant_id)
return
if git_repo.scheduled_update_task is None:
AgentGitHandler.log.info(
"ADC configuration: [auto-commit] %s, [auto-checkout] %s, [interval] %s",
auto_commit, auto_checkout, update_interval)
artifact_update_task = ArtifactUpdateTask(repo_info, auto_checkout, auto_commit)
async_task = ScheduledExecutor(update_interval, artifact_update_task)
AgentGitHandler.log.info("Starting a Scheduled Executor thread for Git polling task")
git_repo.scheduled_update_task = async_task
async_task.start()
AgentGitHandler.log.info("Scheduled artifact synchronization task for path %s" % git_repo.local_repo_path)
else:
AgentGitHandler.log.debug("Artifact synchronization task for path %s already scheduled"
% git_repo.local_repo_path)
@staticmethod
def remove_repo(tenant_id):
git_repo = AgentGitHandler.get_repo(tenant_id)
# stop artifact update task
git_repo.scheduled_update_task.terminate()
# remove git contents
try:
GitUtils.delete_folder_tree(git_repo.local_repo_path)
except GitRepositorySynchronizationException as e:
AgentGitHandler.log.exception(
"Could not remove repository folder for tenant:%s %s" % (git_repo.tenant_id, e))
AgentGitHandler.clear_repo(tenant_id)
AgentGitHandler.log.info("Git repository deleted for tenant %s" % git_repo.tenant_id)
return True
@staticmethod
def execute_git_command(command, repo_path):
"""
Executes the given command string with given environment parameters
:param list command: Command with arguments to be executed
:param str repo_path: Repository path to run the command on
:return: output and error string tuple, RuntimeError if errors occur
:rtype: tuple(str, str)
:exception: RuntimeError
"""
os_env = os.environ.copy()
command.insert(0, "/usr/bin/git")
AgentGitHandler.log.debug("Executing Git command: %s" % command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os_env, cwd=repo_path)
(output, errors) = p.communicate()
AgentGitHandler.log.debug("Git command [output] %s" % str(output))
AgentGitHandler.log.debug("Git command [errors] %s" % str(errors))
return output, errors
class ArtifactUpdateTask(AbstractAsyncScheduledTask):
"""
Checks if the auto-checkout and autocommit are enabled and executes respective tasks
"""
def __init__(self, repo_info, auto_checkout, auto_commit):
self.log = LogFactory().get_log(__name__)
self.repo_info = repo_info
self.auto_checkout = auto_checkout
self.auto_commit = auto_commit
def execute_task(self):
# DO NOT change this order. The commit job should run first here.
# This is because if the cloned location contain any un-tracked files then
# those files should be committed and pushed first
if self.auto_commit:
try:
self.log.debug("Running commit job...")
Config.artifact_commit_plugin.plugin_object.commit(self.repo_info)
except GitRepositorySynchronizationException as e:
self.log.exception("Auto commit failed: %s" % e)
if self.auto_checkout:
try:
self.log.debug("Running checkout job...")
Config.artifact_checkout_plugin.plugin_object.checkout(self.repo_info)
except GitRepositorySynchronizationException as e:
self.log.exception("Auto checkout task failed: %s" % e)
self.log.debug("ArtifactUpdateTask end of iteration.")
class GitRepository:
"""
Represents a git repository inside a particular instance
"""
def __init__(self):
self.repo_url = None
""" :type : str """
self.auth_url = None
""" :type : str """
self.local_repo_path = None
""" :type : str """
self.cloned = False
""" :type : bool """
self.tenant_id = None
""" :type : int """
self.repo_username = None
""" :type : str """
self.repo_password = None
""" :type : str """
""" :type : bool """
self.commit_enabled = False
""" :type : bool """
self.scheduled_update_task = None
""":type : ScheduledExecutor """
self.branch = None
""" :type : str """
class GitUtils:
"""
Util methods required by the AgentGitHandler
"""
def __init__(self):
pass
log = LogFactory().get_log(__name__)
@staticmethod
def create_dir(path):
"""
mkdir the provided path
:param path: The path to the directory to be made
:return: True if mkdir was successful, False if dir already exists
:rtype: bool
"""
try:
os.mkdir(path)
GitUtils.log.debug("Successfully created directory [%s]" % path)
# return True
except OSError as e:
raise GitRepositorySynchronizationException("Directory creating failed in [%s]. " % e)
# return False
@staticmethod
def delete_folder_tree(path):
"""
Completely deletes the provided folder
:param str path: Full path of the folder
:return: void
"""
try:
shutil.rmtree(path)
GitUtils.log.debug("Directory [%s] deleted." % path)
except OSError as e:
raise GitRepositorySynchronizationException("Deletion of folder path %s failed: %s" % (path, e))
|
|
import inspect
import os
from importlib import import_module
from django.apps import apps
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.admindocs import utils
from django.contrib.admindocs.utils import (
replace_named_groups, replace_unnamed_groups,
)
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.db import models
from django.http import Http404
from django.template.engine import Engine
from django.urls import get_mod_func, get_resolver, get_urlconf, reverse
from django.utils.decorators import method_decorator
from django.utils.inspect import (
func_accepts_kwargs, func_accepts_var_args, func_has_no_args,
get_func_full_args,
)
from django.utils.translation import gettext as _
from django.views.generic import TemplateView
# Exclude methods starting with these strings from documentation
MODEL_METHODS_EXCLUDE = ('_', 'add_', 'delete', 'save', 'set_')
class BaseAdminDocsView(TemplateView):
"""
Base view for admindocs views.
"""
@method_decorator(staff_member_required)
def dispatch(self, request, *args, **kwargs):
if not utils.docutils_is_available:
# Display an error message for people without docutils
self.template_name = 'admin_doc/missing_docutils.html'
return self.render_to_response(admin.site.each_context(request))
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
kwargs.update({'root_path': reverse('admin:index')})
kwargs.update(admin.site.each_context(self.request))
return super().get_context_data(**kwargs)
class BookmarkletsView(BaseAdminDocsView):
template_name = 'admin_doc/bookmarklets.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'admin_url': "%s://%s%s" % (
self.request.scheme, self.request.get_host(), context['root_path'])
})
return context
class TemplateTagIndexView(BaseAdminDocsView):
template_name = 'admin_doc/template_tag_index.html'
def get_context_data(self, **kwargs):
tags = []
try:
engine = Engine.get_default()
except ImproperlyConfigured:
# Non-trivial TEMPLATES settings aren't supported (#24125).
pass
else:
app_libs = sorted(engine.template_libraries.items())
builtin_libs = [('', lib) for lib in engine.template_builtins]
for module_name, library in builtin_libs + app_libs:
for tag_name, tag_func in library.tags.items():
title, body, metadata = utils.parse_docstring(tag_func.__doc__)
if title:
title = utils.parse_rst(title, 'tag', _('tag:') + tag_name)
if body:
body = utils.parse_rst(body, 'tag', _('tag:') + tag_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'tag', _('tag:') + tag_name)
tag_library = module_name.split('.')[-1]
tags.append({
'name': tag_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
kwargs.update({'tags': tags})
return super().get_context_data(**kwargs)
class TemplateFilterIndexView(BaseAdminDocsView):
template_name = 'admin_doc/template_filter_index.html'
def get_context_data(self, **kwargs):
filters = []
try:
engine = Engine.get_default()
except ImproperlyConfigured:
# Non-trivial TEMPLATES settings aren't supported (#24125).
pass
else:
app_libs = sorted(engine.template_libraries.items())
builtin_libs = [('', lib) for lib in engine.template_builtins]
for module_name, library in builtin_libs + app_libs:
for filter_name, filter_func in library.filters.items():
title, body, metadata = utils.parse_docstring(filter_func.__doc__)
if title:
title = utils.parse_rst(title, 'filter', _('filter:') + filter_name)
if body:
body = utils.parse_rst(body, 'filter', _('filter:') + filter_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'filter', _('filter:') + filter_name)
tag_library = module_name.split('.')[-1]
filters.append({
'name': filter_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
kwargs.update({'filters': filters})
return super().get_context_data(**kwargs)
class ViewIndexView(BaseAdminDocsView):
template_name = 'admin_doc/view_index.html'
@staticmethod
def _get_full_name(func):
mod_name = func.__module__
return '%s.%s' % (mod_name, func.__qualname__)
def get_context_data(self, **kwargs):
views = []
urlconf = import_module(settings.ROOT_URLCONF)
view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns)
for (func, regex, namespace, name) in view_functions:
views.append({
'full_name': self._get_full_name(func),
'url': simplify_regex(regex),
'url_name': ':'.join((namespace or []) + (name and [name] or [])),
'namespace': ':'.join((namespace or [])),
'name': name,
})
kwargs.update({'views': views})
return super().get_context_data(**kwargs)
class ViewDetailView(BaseAdminDocsView):
template_name = 'admin_doc/view_detail.html'
@staticmethod
def _get_view_func(view):
urlconf = get_urlconf()
if get_resolver(urlconf)._is_callback(view):
mod, func = get_mod_func(view)
try:
# Separate the module and function, e.g.
# 'mymodule.views.myview' -> 'mymodule.views', 'myview').
return getattr(import_module(mod), func)
except ImportError:
# Import may fail because view contains a class name, e.g.
# 'mymodule.views.ViewContainer.my_view', so mod takes the form
# 'mymodule.views.ViewContainer'. Parse it again to separate
# the module and class.
mod, klass = get_mod_func(mod)
return getattr(getattr(import_module(mod), klass), func)
def get_context_data(self, **kwargs):
view = self.kwargs['view']
view_func = self._get_view_func(view)
if view_func is None:
raise Http404
title, body, metadata = utils.parse_docstring(view_func.__doc__)
if title:
title = utils.parse_rst(title, 'view', _('view:') + view)
if body:
body = utils.parse_rst(body, 'view', _('view:') + view)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'model', _('view:') + view)
kwargs.update({
'name': view,
'summary': title,
'body': body,
'meta': metadata,
})
return super().get_context_data(**kwargs)
class ModelIndexView(BaseAdminDocsView):
template_name = 'admin_doc/model_index.html'
def get_context_data(self, **kwargs):
m_list = [m._meta for m in apps.get_models()]
kwargs.update({'models': m_list})
return super().get_context_data(**kwargs)
class ModelDetailView(BaseAdminDocsView):
template_name = 'admin_doc/model_detail.html'
def get_context_data(self, **kwargs):
model_name = self.kwargs['model_name']
# Get the model class.
try:
app_config = apps.get_app_config(self.kwargs['app_label'])
except LookupError:
raise Http404(_("App %(app_label)r not found") % self.kwargs)
try:
model = app_config.get_model(model_name)
except LookupError:
raise Http404(_("Model %(model_name)r not found in app %(app_label)r") % self.kwargs)
opts = model._meta
title, body, metadata = utils.parse_docstring(model.__doc__)
if title:
title = utils.parse_rst(title, 'model', _('model:') + model_name)
if body:
body = utils.parse_rst(body, 'model', _('model:') + model_name)
# Gather fields/field descriptions.
fields = []
for field in opts.fields:
# ForeignKey is a special case since the field will actually be a
# descriptor that returns the other object
if isinstance(field, models.ForeignKey):
data_type = field.remote_field.model.__name__
app_label = field.remote_field.model._meta.app_label
verbose = utils.parse_rst(
(_("the related `%(app_label)s.%(data_type)s` object") % {
'app_label': app_label, 'data_type': data_type,
}),
'model',
_('model:') + data_type,
)
else:
data_type = get_readable_field_data_type(field)
verbose = field.verbose_name
fields.append({
'name': field.name,
'data_type': data_type,
'verbose': verbose or '',
'help_text': field.help_text,
})
# Gather many-to-many fields.
for field in opts.many_to_many:
data_type = field.remote_field.model.__name__
app_label = field.remote_field.model._meta.app_label
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {
'app_label': app_label,
'object_name': data_type,
}
fields.append({
'name': "%s.all" % field.name,
"data_type": 'List',
'verbose': utils.parse_rst(_("all %s") % verbose, 'model', _('model:') + opts.model_name),
})
fields.append({
'name': "%s.count" % field.name,
'data_type': 'Integer',
'verbose': utils.parse_rst(_("number of %s") % verbose, 'model', _('model:') + opts.model_name),
})
methods = []
# Gather model methods.
for func_name, func in model.__dict__.items():
if inspect.isfunction(func):
try:
for exclude in MODEL_METHODS_EXCLUDE:
if func_name.startswith(exclude):
raise StopIteration
except StopIteration:
continue
verbose = func.__doc__
if verbose:
verbose = utils.parse_rst(utils.trim_docstring(verbose), 'model', _('model:') + opts.model_name)
# If a method has no arguments, show it as a 'field', otherwise
# as a 'method with arguments'.
if func_has_no_args(func) and not func_accepts_kwargs(func) and not func_accepts_var_args(func):
fields.append({
'name': func_name,
'data_type': get_return_data_type(func_name),
'verbose': verbose or '',
})
else:
arguments = get_func_full_args(func)
# Join arguments with ', ' and in case of default value,
# join it with '='. Use repr() so that strings will be
# correctly displayed.
print_arguments = ', '.join([
'='.join(list(arg_el[:1]) + [repr(el) for el in arg_el[1:]])
for arg_el in arguments
])
methods.append({
'name': func_name,
'arguments': print_arguments,
'verbose': verbose or '',
})
# Gather related objects
for rel in opts.related_objects:
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {
'app_label': rel.related_model._meta.app_label,
'object_name': rel.related_model._meta.object_name,
}
accessor = rel.get_accessor_name()
fields.append({
'name': "%s.all" % accessor,
'data_type': 'List',
'verbose': utils.parse_rst(_("all %s") % verbose, 'model', _('model:') + opts.model_name),
})
fields.append({
'name': "%s.count" % accessor,
'data_type': 'Integer',
'verbose': utils.parse_rst(_("number of %s") % verbose, 'model', _('model:') + opts.model_name),
})
kwargs.update({
'name': '%s.%s' % (opts.app_label, opts.object_name),
'summary': title,
'description': body,
'fields': fields,
'methods': methods,
})
return super().get_context_data(**kwargs)
class TemplateDetailView(BaseAdminDocsView):
template_name = 'admin_doc/template_detail.html'
def get_context_data(self, **kwargs):
template = self.kwargs['template']
templates = []
try:
default_engine = Engine.get_default()
except ImproperlyConfigured:
# Non-trivial TEMPLATES settings aren't supported (#24125).
pass
else:
# This doesn't account for template loaders (#24128).
for index, directory in enumerate(default_engine.dirs):
template_file = os.path.join(directory, template)
if os.path.exists(template_file):
with open(template_file) as f:
template_contents = f.read()
else:
template_contents = ''
templates.append({
'file': template_file,
'exists': os.path.exists(template_file),
'contents': template_contents,
'order': index,
})
kwargs.update({
'name': template,
'templates': templates,
})
return super().get_context_data(**kwargs)
####################
# Helper functions #
####################
def get_return_data_type(func_name):
"""Return a somewhat-helpful data type given a function name"""
if func_name.startswith('get_'):
if func_name.endswith('_list'):
return 'List'
elif func_name.endswith('_count'):
return 'Integer'
return ''
def get_readable_field_data_type(field):
"""
Return the description for a given field type, if it exists. Fields'
descriptions can contain format strings, which will be interpolated with
the values of field.__dict__ before being output.
"""
return field.description % field.__dict__
def extract_views_from_urlpatterns(urlpatterns, base='', namespace=None):
"""
Return a list of views from a list of urlpatterns.
Each object in the returned list is a two-tuple: (view_func, regex)
"""
views = []
for p in urlpatterns:
if hasattr(p, 'url_patterns'):
try:
patterns = p.url_patterns
except ImportError:
continue
views.extend(extract_views_from_urlpatterns(
patterns,
base + p.regex.pattern,
(namespace or []) + (p.namespace and [p.namespace] or [])
))
elif hasattr(p, 'callback'):
try:
views.append((p.callback, base + p.regex.pattern,
namespace, p.name))
except ViewDoesNotExist:
continue
else:
raise TypeError(_("%s does not appear to be a urlpattern object") % p)
return views
def simplify_regex(pattern):
r"""
Clean up urlpattern regexes into something more readable by humans. For
example, turn "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$"
into "/<sport_slug>/athletes/<athlete_slug>/".
"""
pattern = replace_named_groups(pattern)
pattern = replace_unnamed_groups(pattern)
# clean up any outstanding regex-y characters.
pattern = pattern.replace('^', '').replace('$', '').replace('?', '')
if not pattern.startswith('/'):
pattern = '/' + pattern
return pattern
|
|
from django.conf.urls import url
from rest_framework import routers
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.urlpatterns import format_suffix_patterns
from rest_framework.views import APIView
from onadata.apps.api.viewsets.charts_viewset import ChartsViewSet
from onadata.apps.api.viewsets.connect_viewset import ConnectViewSet
from onadata.apps.api.viewsets.data_viewset import DataViewSet
from onadata.apps.api.viewsets.metadata_viewset import MetaDataViewSet
from onadata.apps.api.viewsets.note_viewset import NoteViewSet
from onadata.apps.api.viewsets.organization_profile_viewset import\
OrganizationProfileViewSet
from onadata.apps.api.viewsets.project_viewset import ProjectViewSet
from onadata.apps.api.viewsets.stats_viewset import StatsViewSet
from onadata.apps.api.viewsets.team_viewset import TeamViewSet
from onadata.apps.api.viewsets.xform_viewset import XFormViewSet
from onadata.apps.api.viewsets.user_profile_viewset import UserProfileViewSet
from onadata.apps.api.viewsets.user_viewset import UserViewSet
from onadata.apps.api.viewsets.submissionstats_viewset import\
SubmissionStatsViewSet
from onadata.apps.api.viewsets.attachment_viewset import AttachmentViewSet
from onadata.apps.api.viewsets.xform_list_api import XFormListApi
from onadata.apps.api.viewsets.xform_submission_api import XFormSubmissionApi
from onadata.apps.api.viewsets.briefcase_api import BriefcaseApi
def make_routes(template_text):
return routers.Route(
url=r'^{prefix}/{%s}{trailing_slash}$' % template_text,
mapping={
'get': 'list',
'post': 'create'
},
name='{basename}-list',
initkwargs={'suffix': 'List'})
class MultiLookupRouter(routers.DefaultRouter):
def __init__(self, *args, **kwargs):
super(MultiLookupRouter, self).__init__(*args, **kwargs)
self.lookups_routes = []
self.lookups_routes.append(routers.Route(
url=r'^{prefix}/{lookups}{trailing_slash}$',
mapping={
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
},
name='{basename}-detail',
initkwargs={'suffix': 'Instance'}
))
self.lookups_routes.append(make_routes('lookup'))
self.lookups_routes.append(make_routes('lookups'))
# Dynamically generated routes.
# Generated using @action or @link decorators on methods of the viewset
self.lookups_routes.append(routers.Route(
url=[
r'^{prefix}/{lookups}/{methodname}{trailing_slash}$',
r'^{prefix}/{lookups}/{methodname}/{extra}{trailing_slash}$'],
mapping={
'{httpmethod}': '{methodname}',
},
name='{basename}-{methodnamehyphen}',
initkwargs={}
))
def get_extra_lookup_regexes(self, route):
ret = []
base_regex = '(?P<{lookup_field}>[^/]+)'
if 'extra_lookup_fields' in route.initkwargs:
for lookup_field in route.initkwargs['extra_lookup_fields']:
ret.append(base_regex.format(lookup_field=lookup_field))
return '/'.join(ret)
def get_lookup_regexes(self, viewset):
ret = []
lookup_fields = getattr(viewset, 'lookup_fields', None)
if lookup_fields:
for i in range(1, len(lookup_fields)):
tmp = []
for lookup_field in lookup_fields[:i + 1]:
if lookup_field == lookup_fields[i]:
base_regex = '(?P<{lookup_field}>[^/.]+)'
else:
base_regex = '(?P<{lookup_field}>[^/]+)'
tmp.append(base_regex.format(lookup_field=lookup_field))
ret.append(tmp)
return ret
def get_lookup_routes(self, viewset):
ret = [self.routes[0]]
# Determine any `@action` or `@link` decorated methods on the viewset
dynamic_routes = []
for methodname in dir(viewset):
attr = getattr(viewset, methodname)
httpmethods = getattr(attr, 'bind_to_methods', None)
if httpmethods:
httpmethods = [method.lower() for method in httpmethods]
dynamic_routes.append((httpmethods, methodname))
for route in self.lookups_routes:
if route.mapping == {'{httpmethod}': '{methodname}'}:
# Dynamic routes (@link or @action decorator)
for httpmethods, methodname in dynamic_routes:
initkwargs = route.initkwargs.copy()
initkwargs.update(getattr(viewset, methodname).kwargs)
mapping = dict(
(httpmethod, methodname) for httpmethod in httpmethods)
name = routers.replace_methodname(route.name, methodname)
if 'extra_lookup_fields' in initkwargs:
uri = route.url[1]
uri = routers.replace_methodname(uri, methodname)
ret.append(routers.Route(
url=uri, mapping=mapping, name='%s-extra' % name,
initkwargs=initkwargs,
))
uri = routers.replace_methodname(route.url[0], methodname)
ret.append(routers.Route(
url=uri, mapping=mapping, name=name,
initkwargs=initkwargs,
))
else:
# Standard route
ret.append(route)
return ret
def get_routes(self, viewset):
ret = []
lookup_fields = getattr(viewset, 'lookup_fields', None)
if lookup_fields:
ret = self.get_lookup_routes(viewset)
else:
ret = super(MultiLookupRouter, self).get_routes(viewset)
return ret
def get_api_root_view(self):
"""
Return a view to use as the API root.
"""
api_root_dict = {}
list_name = self.routes[0].name
for prefix, viewset, basename in self.registry:
api_root_dict[prefix] = list_name.format(basename=basename)
class OnaApi(APIView):
"""
## Ona JSON Rest API endpoints:
### Data
* [/api/v1/charts](/api/v1/charts) - List, Retrieve Charts of collected data
* [/api/v1/data](/api/v1/data) - List, Retrieve submission data
* [/api/v1/stats](/api/v1/stats) - Summary statistics
### Forms
* [/api/v1/forms](/api/v1/forms) - List, Retrieve form information
* [/api/v1/media](/api/v1/media) - List, Retrieve media attachments
* [/api/v1/metadata](/api/v1/metadata) - List, Retrieve form metadata
* [/api/v1/projects](/api/v1/projects) - List, Retrieve, Create,
Update organization projects, forms
* [/api/v1/submissions](/api/v1/submissions) - Submit XForms to a form
### Users and Organizations
* [/api/v1/orgs](/api/v1/orgs) - List, Retrieve, Create,
Update organization and organization info
* [/api/v1/profiles](/api/v1/profiles) - List, Create, Update user information
* [/api/v1/teams](/api/v1/teams) - List, Retrieve, Create, Update teams
* [/api/v1/user](/api/v1/user) - Return authenticated user profile info
* [/api/v1/users](/api/v1/users) - List, Retrieve user data
## Status Codes
* **200** - Successful [`GET`, `PATCH`, `PUT`]
* **201** - Resource successfully created [`POST`]
* **204** - Resouce successfully deleted [`DELETE`]
* **403** - Permission denied to resource
* **404** - Resource was not found
## Authentication
Ona JSON API enpoints support both Basic authentication
and API Token Authentication through the `Authorization` header.
### Basic Authentication
Example using curl:
curl -X GET https://ona.io/api/v1/ -u username:password
### Token Authentication
Example using curl:
curl -X GET https://ona.io/api/v1/ -H "Authorization: Token TOKEN_KEY"
### Temporary Token Authentication
Example using curl:
curl -X GET https://ona.io/api/v1/ -H "Authorization: TempToken TOKEN_KEY"
### Ona Tagging API
* [Filter form list by tags.](
/api/v1/forms#get-list-of-forms-with-specific-tags)
* [List Tags for a specific form.](
/api/v1/forms#get-list-of-tags-for-a-specific-form)
* [Tag Forms.](/api/v1/forms#tag-forms)
* [Delete a specific tag.](/api/v1/forms#delete-a-specific-tag)
* [List form data by tag.](
/api/v1/data#query-submitted-data-of-a-specific-form-using-tags)
* [Tag a specific submission](/api/v1/data#tag-a-submission-data-point)
## Using Oauth2 with the Ona API
You can learn more about oauth2 [here](
http://tools.ietf.org/html/rfc6749).
### 1. Register your client application with Ona - [register](\
/o/applications/register/)
- `name` - name of your application
- `client_type` - Client Type: select confidential
- `authorization_grant_type` - Authorization grant type: Authorization code
- `redirect_uri` - Redirect urls: redirection endpoint
Keep note of the `client_id` and the `client_secret`, it is required when
requesting for an `access_token`.
### 2. Authorize client application.
The authorization url is of the form:
<pre class="prettyprint">
<b>GET</b> /o/authorize?client_id=XXXXXX&response_type=code&state=abc</pre>
example:
http://localhost:8000/o/authorize?client_id=e8&response_type=code&state=xyz
Note: Providing the url to any user will prompt for a password and
request for read and write permission for the application whose `client_id` is
specified.
Where:
- `client_id` - is the client application id - ensure its urlencoded
- `response_type` - should be code
- `state` - a random state string that you client application will get when
redirection happens
What happens:
1. a login page is presented, the username used to login determines the account
that provides access.
2. redirection to the client application occurs, the url is of the form:
> REDIRECT_URI/?state=abc&code=YYYYYYYYY
example redirect uri
http://localhost:30000/?state=xyz&code=SWWk2PN6NdCwfpqiDiPRcLmvkw2uWd
- `code` - is the code to use to request for `access_token`
- `state` - same state string used during authorization request
Your client application should use the `code` to request for an access_token.
### 3. Request for access token.
You need to make a `POST` request with `grant_type`, `code`, `client_id` and
`redirect_uri` as `POST` payload params. You should authenticate the request
with `Basic Authentication` using your `client_id` and `client_secret` as
`username:password` pair.
Request:
<pre class="prettyprint">
<b>POST</b>/o/token</pre>
Payload:
grant_type=authorization_code&code=YYYYYYYYY&client_id=XXXXXX&
redirect_uri=http://redirect/uri/path
curl example:
curl -X POST -d "grant_type=authorization_code&
code=PSwrMilnJESZVFfFsyEmEukNv0sGZ8&
client_id=e8x4zzJJIyOikDqjPcsCJrmnU22QbpfHQo4HhRnv&
redirect_uri=http://localhost:30000" "http://localhost:8000/o/token/"
--user "e8:xo7i4LNpMj"
Response:
{
"access_token": "Q6dJBs9Vkf7a2lVI7NKLT8F7c6DfLD",
"token_type": "Bearer", "expires_in": 36000,
"refresh_token": "53yF3uz79K1fif2TPtNBUFJSFhgnpE",
"scope": "read write groups"
}
Where:
- `access_token` - access token - expires
- `refresh_token` - token to use to request a new `access_token` in case it has
expored.
Now that you have an `access_token` you can make API calls.
### 4. Accessing the Ona API using the `access_token`.
Example using curl:
curl -X GET https://ona.io/api/v1
-H "Authorization: Bearer ACCESS_TOKEN"
"""
_ignore_model_permissions = True
def get(self, request, format=None):
ret = {}
for key, url_name in api_root_dict.items():
ret[key] = reverse(
url_name, request=request, format=format)
return Response(ret)
return OnaApi.as_view()
def get_urls(self):
ret = []
if self.include_root_view:
root_url = url(r'^$', self.get_api_root_view(),
name=self.root_view_name)
ret.append(root_url)
for prefix, viewset, basename in self.registry:
lookup = self.get_lookup_regex(viewset)
lookup_list = self.get_lookup_regexes(viewset)
if lookup_list:
# lookup = lookups[0]
lookup_list = [u'/'.join(k) for k in lookup_list]
else:
lookup_list = [u'']
routes = self.get_routes(viewset)
for route in routes:
mapping = self.get_method_map(viewset, route.mapping)
if not mapping:
continue
for lookups in lookup_list:
regex = route.url.format(
prefix=prefix,
lookup=lookup,
lookups=lookups,
trailing_slash=self.trailing_slash,
extra=self.get_extra_lookup_regexes(route)
)
view = viewset.as_view(mapping, **route.initkwargs)
name = route.name.format(basename=basename)
ret.append(url(regex, view, name=name))
if self.include_format_suffixes:
ret = format_suffix_patterns(ret, allowed=['[a-z]+[0-9]*'])
return ret
router = MultiLookupRouter(trailing_slash=False)
router.register(r'users', UserViewSet)
router.register(r'user', ConnectViewSet)
router.register(r'profiles', UserProfileViewSet)
router.register(r'orgs', OrganizationProfileViewSet)
router.register(r'forms', XFormViewSet)
router.register(r'projects', ProjectViewSet)
router.register(r'teams', TeamViewSet)
router.register(r'notes', NoteViewSet)
router.register(r'data', DataViewSet, base_name='data')
router.register(r'stats', StatsViewSet, base_name='stats')
router.register(r'stats/submissions', SubmissionStatsViewSet,
base_name='submissionstats')
router.register(r'charts', ChartsViewSet, base_name='chart')
router.register(r'metadata', MetaDataViewSet, base_name='metadata')
router.register(r'media', AttachmentViewSet, base_name='attachment')
router.register(r'formlist', XFormListApi, base_name='formlist')
router.register(r'submissions', XFormSubmissionApi, base_name='submissions')
router.register(r'briefcase', BriefcaseApi, base_name='briefcase')
|
|
from __future__ import unicode_literals
import copy
import logging
import sys
import warnings
from django.conf import compat_patch_logging_config, LazySettings
from django.core import mail
from django.test import TestCase, RequestFactory
from django.test.utils import override_settings
from django.utils.encoding import force_text
from django.utils.log import CallbackFilter, RequireDebugFalse
from django.utils.six import StringIO
from django.utils.unittest import skipUnless
from ..admin_scripts.tests import AdminScriptTestCase
PYVERS = sys.version_info[:2]
# logging config prior to using filter with mail_admins
OLD_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
class PatchLoggingConfigTest(TestCase):
"""
Tests for backward-compat shim for #16288. These tests should be removed in
Django 1.6 when that shim and DeprecationWarning are removed.
"""
def test_filter_added(self):
"""
Test that debug-false filter is added to mail_admins handler if it has
no filters.
"""
config = copy.deepcopy(OLD_LOGGING)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
compat_patch_logging_config(config)
self.assertEqual(len(w), 1)
self.assertEqual(
config["handlers"]["mail_admins"]["filters"],
['require_debug_false'])
def test_filter_configuration(self):
"""
Test that the auto-added require_debug_false filter is an instance of
`RequireDebugFalse` filter class.
"""
config = copy.deepcopy(OLD_LOGGING)
with warnings.catch_warnings(record=True):
compat_patch_logging_config(config)
flt = config["filters"]["require_debug_false"]
self.assertEqual(flt["()"], "django.utils.log.RequireDebugFalse")
def test_require_debug_false_filter(self):
"""
Test the RequireDebugFalse filter class.
"""
filter_ = RequireDebugFalse()
with self.settings(DEBUG=True):
self.assertEqual(filter_.filter("record is not used"), False)
with self.settings(DEBUG=False):
self.assertEqual(filter_.filter("record is not used"), True)
def test_no_patch_if_filters_key_exists(self):
"""
Test that the logging configuration is not modified if the mail_admins
handler already has a "filters" key.
"""
config = copy.deepcopy(OLD_LOGGING)
config["handlers"]["mail_admins"]["filters"] = []
new_config = copy.deepcopy(config)
compat_patch_logging_config(new_config)
self.assertEqual(config, new_config)
def test_no_patch_if_no_mail_admins_handler(self):
"""
Test that the logging configuration is not modified if the mail_admins
handler is not present.
"""
config = copy.deepcopy(OLD_LOGGING)
config["handlers"].pop("mail_admins")
new_config = copy.deepcopy(config)
compat_patch_logging_config(new_config)
self.assertEqual(config, new_config)
class DefaultLoggingTest(TestCase):
def setUp(self):
self.logger = logging.getLogger('django')
self.old_stream = self.logger.handlers[0].stream
def tearDown(self):
self.logger.handlers[0].stream = self.old_stream
def test_django_logger(self):
"""
The 'django' base logger only output anything when DEBUG=True.
"""
output = StringIO()
self.logger.handlers[0].stream = output
self.logger.error("Hey, this is an error.")
self.assertEqual(output.getvalue(), '')
with self.settings(DEBUG=True):
self.logger.error("Hey, this is an error.")
self.assertEqual(output.getvalue(), 'Hey, this is an error.\n')
@skipUnless(PYVERS > (2,6), "warnings captured only in Python >= 2.7")
class WarningLoggerTests(TestCase):
"""
Tests that warnings output for DeprecationWarnings is enabled
and captured to the logging system
"""
def setUp(self):
# If tests are invoke with "-Wall" (or any -W flag actually) then
# warning logging gets disabled (see django/conf/__init__.py). However,
# these tests expect warnings to be logged, so manually force warnings
# to the logs. Use getattr() here because the logging capture state is
# undocumented and (I assume) brittle.
self._old_capture_state = bool(getattr(logging, '_warnings_showwarning', False))
logging.captureWarnings(True)
# this convoluted setup is to avoid printing this deprecation to
# stderr during test running - as the test runner forces deprecations
# to be displayed at the global py.warnings level
self.logger = logging.getLogger('py.warnings')
self.outputs = []
self.old_streams = []
for handler in self.logger.handlers:
self.old_streams.append(handler.stream)
self.outputs.append(StringIO())
handler.stream = self.outputs[-1]
def tearDown(self):
for i, handler in enumerate(self.logger.handlers):
self.logger.handlers[i].stream = self.old_streams[i]
# Reset warnings state.
logging.captureWarnings(self._old_capture_state)
@override_settings(DEBUG=True)
def test_warnings_capture(self):
warnings.warn('Foo Deprecated', DeprecationWarning)
output = force_text(self.outputs[0].getvalue())
self.assertTrue('Foo Deprecated' in output)
def test_warnings_capture_debug_false(self):
warnings.warn('Foo Deprecated', DeprecationWarning)
output = force_text(self.outputs[0].getvalue())
self.assertFalse('Foo Deprecated' in output)
class CallbackFilterTest(TestCase):
def test_sense(self):
f_false = CallbackFilter(lambda r: False)
f_true = CallbackFilter(lambda r: True)
self.assertEqual(f_false.filter("record"), False)
self.assertEqual(f_true.filter("record"), True)
def test_passes_on_record(self):
collector = []
def _callback(record):
collector.append(record)
return True
f = CallbackFilter(_callback)
f.filter("a record")
self.assertEqual(collector, ["a record"])
class AdminEmailHandlerTest(TestCase):
logger = logging.getLogger('django.request')
def get_admin_email_handler(self, logger):
# Inspired from regressiontests/views/views.py: send_log()
# ensuring the AdminEmailHandler does not get filtered out
# even with DEBUG=True.
admin_email_handler = [
h for h in logger.handlers
if h.__class__.__name__ == "AdminEmailHandler"
][0]
return admin_email_handler
@override_settings(
ADMINS=(('whatever admin', 'admin@example.com'),),
EMAIL_SUBJECT_PREFIX='-SuperAwesomeSubject-'
)
def test_accepts_args(self):
"""
Ensure that user-supplied arguments and the EMAIL_SUBJECT_PREFIX
setting are used to compose the email subject.
Refs #16736.
"""
message = "Custom message that says '%s' and '%s'"
token1 = 'ping'
token2 = 'pong'
admin_email_handler = self.get_admin_email_handler(self.logger)
# Backup then override original filters
orig_filters = admin_email_handler.filters
try:
admin_email_handler.filters = []
self.logger.error(message, token1, token2)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['admin@example.com'])
self.assertEqual(mail.outbox[0].subject,
"-SuperAwesomeSubject-ERROR: Custom message that says 'ping' and 'pong'")
finally:
# Restore original filters
admin_email_handler.filters = orig_filters
@override_settings(
ADMINS=(('whatever admin', 'admin@example.com'),),
EMAIL_SUBJECT_PREFIX='-SuperAwesomeSubject-',
INTERNAL_IPS=('127.0.0.1',),
)
def test_accepts_args_and_request(self):
"""
Ensure that the subject is also handled if being
passed a request object.
"""
message = "Custom message that says '%s' and '%s'"
token1 = 'ping'
token2 = 'pong'
admin_email_handler = self.get_admin_email_handler(self.logger)
# Backup then override original filters
orig_filters = admin_email_handler.filters
try:
admin_email_handler.filters = []
rf = RequestFactory()
request = rf.get('/')
self.logger.error(message, token1, token2,
extra={
'status_code': 403,
'request': request,
}
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['admin@example.com'])
self.assertEqual(mail.outbox[0].subject,
"-SuperAwesomeSubject-ERROR (internal IP): Custom message that says 'ping' and 'pong'")
finally:
# Restore original filters
admin_email_handler.filters = orig_filters
@override_settings(
ADMINS=(('admin', 'admin@example.com'),),
EMAIL_SUBJECT_PREFIX='',
DEBUG=False,
)
def test_subject_accepts_newlines(self):
"""
Ensure that newlines in email reports' subjects are escaped to avoid
AdminErrorHandler to fail.
Refs #17281.
"""
message = 'Message \r\n with newlines'
expected_subject = 'ERROR: Message \\r\\n with newlines'
self.assertEqual(len(mail.outbox), 0)
self.logger.error(message)
self.assertEqual(len(mail.outbox), 1)
self.assertFalse('\n' in mail.outbox[0].subject)
self.assertFalse('\r' in mail.outbox[0].subject)
self.assertEqual(mail.outbox[0].subject, expected_subject)
@override_settings(
ADMINS=(('admin', 'admin@example.com'),),
EMAIL_SUBJECT_PREFIX='',
DEBUG=False,
)
def test_truncate_subject(self):
"""
RFC 2822's hard limit is 998 characters per line.
So, minus "Subject: ", the actual subject must be no longer than 989
characters.
Refs #17281.
"""
message = 'a' * 1000
expected_subject = 'ERROR: aa' + 'a' * 980
self.assertEqual(len(mail.outbox), 0)
self.logger.error(message)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, expected_subject)
class SettingsConfigTest(AdminScriptTestCase):
"""
Test that accessing settings in a custom logging handler does not trigger
a circular import error.
"""
def setUp(self):
log_config = """{
'version': 1,
'handlers': {
'custom_handler': {
'level': 'INFO',
'class': 'logging_tests.logconfig.MyHandler',
}
}
}"""
self.write_settings('settings.py', sdict={'LOGGING': log_config})
def tearDown(self):
self.remove_settings('settings.py')
def test_circular_dependency(self):
# validate is just an example command to trigger settings configuration
out, err = self.run_manage(['validate'])
self.assertNoOutput(err)
self.assertOutput(out, "0 errors found")
def dictConfig(config):
dictConfig.called = True
dictConfig.called = False
class SettingsConfigureLogging(TestCase):
"""
Test that calling settings.configure() initializes the logging
configuration.
"""
def test_configure_initializes_logging(self):
settings = LazySettings()
settings.configure(
LOGGING_CONFIG='regressiontests.logging_tests.tests.dictConfig')
self.assertTrue(dictConfig.called)
|
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
#
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2015. It is licensed under
# the three-clause BSD license; see LICENSE.
# Contact: khmer-project@idyll.org
# pylint: disable=missing-docstring,invalid-name,unused-variable
import os
import shutil
import threading
import io
import screed
import khmer
from . import khmer_tst_utils as utils
from nose.plugins.attrib import attr
from .test_scripts import _make_counting
def test_normalize_by_median_indent():
infile = utils.get_test_data('paired-mixed.fa.pe')
hashfile = utils.get_test_data('normC20k20.ct')
outfile = utils.get_temp_filename('paired-mixed.fa.pe.keep')
script = 'normalize-by-median.py'
args = ['--loadgraph', hashfile, '-o', outfile, infile]
(status, out, err) = utils.runscript(script, args)
assert status == 0, (out, err)
assert os.path.exists(outfile)
def test_normalize_by_median_loadgraph_with_args():
infile = utils.get_test_data("test-abund-read-2.fa")
tablefile = utils.get_temp_filename("table")
in_dir = os.path.dirname(tablefile)
script = "load-into-counting.py"
args = [tablefile, infile]
(status, out, err) = utils.runscript(script, args)
script = "normalize-by-median.py"
args = ["--ksize", "7", "--loadgraph", tablefile, infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert 'WARNING: You are loading a saved k-mer countgraph from' in err, err
def test_normalize_by_median_empty_file():
infile = utils.get_temp_filename('empty')
shutil.copyfile(utils.get_test_data('empty-file'), infile)
script = 'normalize-by-median.py'
in_dir = os.path.dirname(infile)
args = [infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert 'WARNING:' in err, err
assert 'is empty' in err, err
assert 'SKIPPED' in err, err
def test_normalize_by_median():
CUTOFF = '1'
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-abund-read-2.fa'), infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert 'Total number of unique k-mers: 98' in err, err
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = [r.sequence for r in screed.open(outfile)]
assert len(seqs) == 1, seqs
assert seqs[0].startswith('GGTTGACGGGGCTCAGGGGG'), seqs
assert "I/O Errors" not in err
def test_normalize_by_median_quiet():
CUTOFF = '1'
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-abund-read-2.fa'), infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', '--quiet', '-M', '2e6', infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert len(out) == 0, out
assert len(err) == 0, err
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = [r.sequence for r in screed.open(outfile)]
assert len(seqs) == 1, seqs
assert seqs[0].startswith('GGTTGACGGGGCTCAGGGGG'), seqs
assert "I/O Errors" not in err
def test_normalize_by_median_unpaired_final_read():
CUTOFF = '1'
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('single-read.fq'), infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', '-p', infile]
(status, out, err) = utils.runscript(script, args, in_dir, fail_ok=True)
assert status != 0
assert "ERROR: Unpaired reads when require_paired" in err, err
def test_normalize_by_median_sanity_check_0():
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('single-read.fq'), infile)
script = 'normalize-by-median.py'
args = ['-U', '1024', '--max-mem', '60', infile]
(status, out, err) = utils.runscript(script, args, in_dir, fail_ok=True)
assert status != 0, status
assert "recommended false positive ceiling of 0.1!" in err, err
def test_normalize_by_median_sanity_check_1():
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-filter-abund-Ns.fq'), infile)
script = 'normalize-by-median.py'
args = ['-U', '83', '--max-tablesize', '17', infile]
(status, out, err) = utils.runscript(script, args, in_dir, fail_ok=True)
assert status != 0
assert "Warning: The given tablesize is too small!" in err, err
def test_normalize_by_median_sanity_check_2():
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-filter-abund-Ns.fq'), infile)
script = 'normalize-by-median.py'
args = ['-U', '83', infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert "*** INFO: set memory ceiling automatically." in err, err
assert "*** Ceiling is: 1e+06 bytes" in err, err
def test_normalize_by_median_sanity_check_3():
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
tablefile = utils.get_temp_filename('table', in_dir)
shutil.copyfile(utils.get_test_data('test-filter-abund-Ns.fq'), infile)
script = 'normalize-by-median.py'
args = ['-s', tablefile, '-U', '83', '--fp-rate', '0.7', infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert "Overriding default fp 0.1 with new fp: 0.7" in err, err
args = ['--loadgraph', tablefile, '-U', '83', infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert "WARNING: You have asked that the graph size be auto" in err, err
assert "NOT be set automatically" in err, err
assert "loading an existing graph" in err, err
def test_normalize_by_median_unforced_badfile():
CUTOFF = '1'
infile = utils.get_temp_filename("potatoes")
outfile = infile + '.keep'
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', infile]
(status, out, err) = utils.runscript(script, args, in_dir, fail_ok=True)
assert status != 0
assert "ERROR: [Errno 2] No such file or directory:" in err, err
if os.path.exists(outfile):
assert False, '.keep file should have been removed: '
def test_normalize_by_median_contradictory_args():
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
outfile = utils.get_temp_filename('report.out')
shutil.copyfile(utils.get_test_data('test-large.fa'), infile)
script = 'normalize-by-median.py'
args = ['-C', '1', '-k', '17', '--force-single', '-p', '-R',
outfile, infile]
(status, out, err) = utils.runscript(script, args, in_dir, fail_ok=True)
assert status != 0
assert "cannot both be set" in err, err
def test_normalize_by_median_stdout_3():
CUTOFF = '1'
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-abund-read-2.fa'), infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', infile, '--out', '-']
(status, out, err) = utils.runscript(script, args, in_dir)
assert 'Total number of unique k-mers: 98' in err, err
assert 'in block device' in err, err
assert "I/O Errors" not in err
@attr('known_failing')
def test_normalize_by_median_known_good():
CUTOFF = '2'
infile = utils.get_temp_filename('test.fa.gz')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('100k-filtered.fa.gz'), infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '20', '-x', '4e6', infile]
(status, out, err) = utils.runscript(script, args, in_dir)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
iter_known = screed.open(utils.get_test_data('100k-filtered.fa.keep.gz'))
iter_out = screed.open(outfile)
try:
for rknown, rout in zip(iter_known, iter_out):
assert rknown.name == rout.name
except Exception as e:
print(e)
assert False
def test_normalize_by_median_report_fp():
# this tests basic reporting of diginorm stats => report.out, including
# a test of aggregate stats for two input files.
infile = utils.get_temp_filename('test.fa')
shutil.copyfile(utils.get_test_data('test-abund-read-2.fa'), infile)
infile2 = utils.get_temp_filename('test2.fa')
shutil.copyfile(utils.get_test_data('test-abund-read-2.fa'), infile2)
in_dir = os.path.dirname(infile)
outfile = utils.get_temp_filename('report.out')
script = 'normalize-by-median.py'
args = ['-C', '1', '-k', '17', '-R', outfile, infile, infile2]
(status, out, err) = utils.runscript(script, args, in_dir)
assert os.path.exists(outfile)
report = open(outfile, 'r')
line = report.readline().strip()
assert line == 'total,kept,f_kept', line
line = report.readline().strip()
assert line == '1001,1,0.000999', line
line = report.readline().strip()
assert line == '2002,1,0.0004995', line
def test_normalize_by_median_report_fp_hifreq():
# this tests high-frequency reporting of diginorm stats for a single
# file => report.out.
infile = utils.get_temp_filename('test.fa')
shutil.copyfile(utils.get_test_data('test-abund-read-2.fa'), infile)
in_dir = os.path.dirname(infile)
outfile = utils.get_temp_filename('report.out')
script = 'normalize-by-median.py'
args = ['-C', '1', '-k', '17', '-R', outfile, infile,
'--report-frequency', '100']
(status, out, err) = utils.runscript(script, args, in_dir)
assert os.path.exists(outfile)
report = open(outfile, 'r')
line = report.readline().strip()
assert line == 'total,kept,f_kept', line
line = report.readline().strip()
assert line == '100,1,0.01', line
line = report.readline().strip()
assert line == '200,1,0.005', line
@attr('huge')
def test_normalize_by_median_report_fp_huge():
# this tests reporting of diginorm stats => report.out for a large
# file, with the default reporting interval of once every 100k.
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
outfile = utils.get_temp_filename('report.out')
shutil.copyfile(utils.get_test_data('test-large.fa'), infile)
script = 'normalize-by-median.py'
args = ['-C', '1', '-k', '17', '-R', outfile, infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert "fp rate estimated to be 0.623" in err, err
report = open(outfile, 'r')
line = report.readline() # skip header
line = report.readline()
assert "100000,25261,0.2526" in line, line
def test_normalize_by_median_unpaired_and_paired():
CUTOFF = '1'
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-abund-read-paired.fa'), infile)
unpairedfile = utils.get_temp_filename('test1.fa', tempdir=in_dir)
shutil.copyfile(utils.get_test_data('random-20-a.fa'), unpairedfile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', '-u', unpairedfile, '-p', infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert 'Total number of unique k-mers: 4061' in err, err
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
def test_normalize_by_median_count_kmers_PE():
CUTOFF = '1'
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
# The test file has one pair of identical read except for the last base
# The 2nd read should be discarded in the unpaired mode
# but kept in the paired end mode adding only one more unique kmer
shutil.copyfile(utils.get_test_data('paired_one.base.dif.fa'), infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', '--force-single', infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert 'Total number of unique k-mers: 98' in err, err
assert 'kept 1 of 2 or 50.0%' in err, err
args = ['-C', CUTOFF, '-k', '17', '-p', infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert 'Total number of unique k-mers: 99' in err, err
assert 'kept 2 of 2 or 100.0%' in err, err
def test_normalize_by_median_double_file_name():
infile = utils.get_temp_filename('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-abund-read-2.fa'), infile)
script = 'normalize-by-median.py'
args = [utils.get_test_data('test-abund-read-2.fa'), infile]
(status, out, err) = utils.runscript(script, args, in_dir, fail_ok=True)
assert status != 0
assert "Duplicate filename--Cannot handle this!" in err, err
def test_normalize_by_median_stdin_no_out():
infile = utils.get_temp_filename('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = ["-"]
(status, out, err) = utils.runscript(script, args, in_dir, fail_ok=True)
assert status != 0
assert "Accepting input from stdin; output filename" in err, err
def test_normalize_by_median_overwrite():
outfile = utils.get_temp_filename('test.fa.keep')
shutil.copyfile(utils.get_test_data('test-abund-read.fa'), outfile)
in_dir = os.path.dirname(outfile)
CUTOFF = '1'
infile = utils.get_temp_filename('test.fa', in_dir)
shutil.copyfile(utils.get_test_data('test-abund-read-3.fa'), infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', '-o', outfile, infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert os.path.exists(outfile), outfile
seqs = [r.sequence for r in screed.open(outfile)]
assert len(seqs) == 1, seqs
assert 'GACAGCgtgCCGCA' in seqs[0], seqs
def test_normalize_by_median_version():
script = 'normalize-by-median.py'
args = ['--version']
status, out, err = utils.runscript(script, args)
errlines = err.splitlines()
for err in errlines:
if err.startswith('||') or \
not err.strip():
continue
break
print(errlines)
print(err)
assert err.startswith('khmer ')
def test_normalize_by_median_2():
CUTOFF = '2'
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-abund-read-2.fa'), infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', infile]
utils.runscript(script, args, in_dir)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = [r.sequence for r in screed.open(outfile)]
assert len(seqs) == 2, seqs
assert seqs[0].startswith('GGTTGACGGGGCTCAGGGGG'), seqs
assert seqs[1] == 'GGTTGACGGGGCTCAGGG', seqs
def test_normalize_by_median_paired():
CUTOFF = '1'
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-abund-read-paired.fa'), infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-p', '-k', '17', infile]
utils.runscript(script, args, in_dir)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = [r.sequence for r in screed.open(outfile)]
assert len(seqs) == 2, seqs
assert seqs[0].startswith('GGTTGACGGGGCTCAGGGGG'), seqs
assert seqs[1].startswith('GGTTGACGGGGCTCAGGG'), seqs
def test_normalize_by_median_paired_fq():
CUTOFF = '20'
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-abund-read-paired.fq'), infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-p', '-k', '17', infile]
_, out, err = utils.runscript(script, args, in_dir)
print(out)
print(err)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = [r.sequence for r in screed.open(outfile)]
assert len(seqs) == 6, len(seqs)
assert seqs[0].startswith('GGTTGACGGGGCTCAGGGGG'), seqs
assert seqs[1].startswith('GGTTGACGGGGCTCAGGG'), seqs
names = [r.name for r in screed.open(outfile)]
assert len(names) == 6, names
assert '895:1:37:17593:9954 1::FOO' in names, names
assert '895:1:37:17593:9954 2::FOO' in names, names
def test_normalize_by_median_impaired():
CUTOFF = '1'
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-abund-read-impaired.fa'), infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-p', '-k', '17', infile]
status, out, err = utils.runscript(script, args, in_dir, fail_ok=True)
status != 0
assert 'ERROR: Unpaired reads ' in err, err
def test_normalize_by_median_force():
CUTOFF = '1'
corrupt_infile = utils.get_temp_filename('test-corrupt.fq')
good_infile = utils.get_temp_filename('test-good.fq',
tempdir=os.path.dirname(
corrupt_infile))
in_dir = os.path.dirname(good_infile)
shutil.copyfile(utils.get_test_data('test-error-reads.fq'), corrupt_infile)
shutil.copyfile(utils.get_test_data('test-fastq-reads.fq'), good_infile)
script = 'normalize-by-median.py'
args = ['-f', '-C', CUTOFF, '-k', '17', corrupt_infile, good_infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert '*** Skipping' in err
assert '** I/O Errors' in err
def test_normalize_by_median_no_bigcount():
infile = utils.get_temp_filename('test.fa')
hashfile = utils.get_temp_filename('test-out.ct')
outfile = infile + '.keep'
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-abund-read-2.fa'), infile)
counting_ht = _make_counting(infile, K=8)
script = 'normalize-by-median.py'
args = ['-C', '1000', '-k 8', '--savegraph', hashfile, infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert status == 0, (out, err)
print((out, err))
assert os.path.exists(hashfile), hashfile
kh = khmer.load_countgraph(hashfile)
assert kh.get('GGTTGACG') == 255
def test_normalize_by_median_empty():
CUTOFF = '1'
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-empty.fa'), infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', infile]
utils.runscript(script, args, in_dir)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
def test_normalize_by_median_emptycountgraph():
CUTOFF = '1'
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-empty.fa'), infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '--loadgraph', infile, infile]
(status, out, err) = utils.runscript(script, args, in_dir, fail_ok=True)
assert status != 0
assert 'ValueError' in err, (status, out, err)
def test_normalize_by_median_fpr():
MAX_TABLESIZE_PARAM = 12
infile = utils.get_temp_filename('test-fpr.fq')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-fastq-reads.fq'), infile)
script = 'normalize-by-median.py'
args = ['-f', '-k 17', '-x ' + str(MAX_TABLESIZE_PARAM), infile]
(status, out, err) = utils.runscript(script, args, in_dir, fail_ok=True)
assert status != 0
assert os.path.exists(infile + '.keep'), infile
assert '** ERROR: the graph structure is too small' in err, err
def write_by_chunks(infile, outfile, CHUNKSIZE=8192):
ifile = io.open(infile, 'rb')
ofile = io.open(outfile, 'wb')
chunk = ifile.read(CHUNKSIZE)
while len(chunk) > 0:
ofile.write(chunk)
chunk = ifile.read(CHUNKSIZE)
ifile.close()
ofile.close()
def test_normalize_by_median_streaming_0():
CUTOFF = '20'
infile = utils.get_test_data('100-reads.fq.gz')
in_dir = os.path.dirname(infile)
fifo = utils.get_temp_filename('fifo')
outfile = utils.get_temp_filename('outfile')
# Use a fifo to copy stdout to a file for checking
os.mkfifo(fifo)
thread = threading.Thread(target=write_by_chunks, args=(fifo, outfile))
thread.start()
# Execute diginorm
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', '-o', fifo, infile]
(status, out, err) = utils.runscript(script, args, in_dir)
# Merge the thread
thread.join()
assert os.path.exists(outfile), outfile
with open(outfile) as fp:
linecount = sum(1 for _ in fp)
assert linecount == 400
def test_normalize_by_median_streaming_1():
CUTOFF = '20'
infile = utils.get_test_data('test-filter-abund-Ns.fq')
in_dir = os.path.dirname(infile)
fifo = utils.get_temp_filename('fifo')
outfile = utils.get_temp_filename('outfile')
# Use a fifo to copy stdout to a file for checking
os.mkfifo(fifo)
thread = threading.Thread(target=write_by_chunks, args=(infile, fifo))
thread.start()
# Execute diginorm
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', '-o', outfile, fifo]
(status, out, err) = utils.runscript(script, args, in_dir)
# Merge the thread
thread.join()
assert os.path.exists(outfile), outfile
assert 'Total number of unique k-mers: 98' in err, err
assert 'fifo is empty' not in err, err
def test_diginorm_basic_functionality_1():
# each of these pairs has both a multicopy sequence ('ACTTCA...') and
# a random sequence. With 'C=1' and '-p', all should be kept.
CUTOFF = ['-C', '1']
PAIRING = ['-p']
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('dn-test-all-paired-all-keep.fa'),
infile)
script = 'normalize-by-median.py'
args = list(CUTOFF) + list(PAIRING) + ['-k', '15', infile]
_, out, err = utils.runscript(script, args, in_dir)
print(out)
print(err)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = set([r.name for r in screed.open(outfile)])
assert seqs == set(['a/1', 'a/2',
'b/1', 'b/2',
'c/1', 'c/2',
'd/1', 'd/2']), seqs
def test_diginorm_basic_functionality_2():
# each of these pairs has both a multicopy sequence ('ACTTCA...')
# and a random sequence ('G...'). With 'C=1' and '--force-
# single', only random seqs should be kept, together with one copy
# of the multicopy sequence.
CUTOFF = ['-C', '1']
PAIRING = ['--force-single']
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('dn-test-all-paired-all-keep.fa'),
infile)
script = 'normalize-by-median.py'
args = list(CUTOFF) + list(PAIRING) + ['-k', '15', infile]
_, out, err = utils.runscript(script, args, in_dir)
print(out)
print(err)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = set([r.name for r in screed.open(outfile)])
assert seqs == set(['a/1', 'a/2',
'b/2',
'c/1',
'd/2']), seqs
def test_diginorm_basic_functionality_3():
# This data is entirely unpaired, but with one duplicate ('A...').
# and a random sequence ('G...'). With 'C=1' only three seqs should
# be left, with no other complaints.
CUTOFF = ['-C', '1']
PAIRING = []
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('dn-test-none-paired.fa'),
infile)
script = 'normalize-by-median.py'
args = list(CUTOFF) + list(PAIRING) + ['-k', '15', infile]
_, out, err = utils.runscript(script, args, in_dir)
print(out)
print(err)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = set([r.name for r in screed.open(outfile)])
assert seqs == set(['a/1',
'b/2',
'd/1']), seqs
def test_diginorm_basic_functionality_4():
# This data is mixed paired/unpaired, but with one duplicate ('A...').
# and a random sequence ('G...'). With 'C=2' all of the sequences
# should be kept.
CUTOFF = ['-C', '1']
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('dn-test-some-paired-all-keep.fa'),
infile)
script = 'normalize-by-median.py'
args = list(CUTOFF) + ['-k', '15', infile]
_, out, err = utils.runscript(script, args, in_dir)
print(out)
print(err)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = set([r.name for r in screed.open(outfile)])
assert seqs == set(['a/1', 'a/2',
'b/2',
'c/1', 'c/2',
'd/2']), seqs
def test_diginorm_basic_functionality_5():
# each of these pairs has both a multicopy sequence ('ACTTCA...') and
# a random sequence. With 'C=1' and '-p', all should be
CUTOFF = ['-C', '1']
PAIRING = ['-p']
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('dn-test-all-paired-all-keep.fa'),
infile)
script = 'normalize-by-median.py'
args = list(CUTOFF) + list(PAIRING) + ['-k', '15', infile]
_, out, err = utils.runscript(script, args, in_dir)
print(out)
print(err)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = set([r.name for r in screed.open(outfile)])
assert seqs == set(['a/1', 'a/2',
'b/1', 'b/2',
'c/1', 'c/2',
'd/1', 'd/2']), seqs
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for fused_batch_norm related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_grad
from tensorflow.python.ops import nn_impl
from tensorflow.python.platform import test
class BatchNormalizationTest(test.TestCase):
def _inference_ref(self, x, scale, offset, mean, var, epsilon, data_format):
if data_format not in ['NHWC', 'NCHW']:
raise ValueError('data_format must be NCHW or NHWC, '
'got %s.' % data_format)
if data_format == 'NCHW':
x = array_ops.transpose(x, [0, 2, 3, 1])
y = nn_impl.batch_normalization(x, mean, var, offset, scale, epsilon)
if data_format == 'NCHW':
y = array_ops.transpose(y, [0, 3, 1, 2])
return y.eval()
def _test_inference(self,
x_shape,
scale_shape,
use_gpu=True,
data_format='NHWC'):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name='x')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
mean = constant_op.constant(mean_val, name='mean')
var = constant_op.constant(var_val, name='variance')
epsilon = 0.001
y, _, _ = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=mean,
variance=var,
epsilon=epsilon,
data_format=data_format,
is_training=False)
y_val = sess.run(y)
y_ref = self._inference_ref(x, scale, offset, mean, var, epsilon,
data_format)
self.assertAllClose(y_ref, y_val, atol=1e-3)
def _training_ref(self, x, scale, offset, epsilon, data_format):
if data_format not in ['NHWC', 'NCHW']:
raise ValueError('data_format must be NCHW or NHWC, '
'got %s.' % data_format)
if data_format == 'NCHW':
x = array_ops.transpose(x, [0, 2, 3, 1])
mean, var = nn_impl.moments(x, [0, 1, 2], keep_dims=False)
y = nn_impl.batch_normalization(x, mean, var, offset, scale, epsilon)
if data_format == 'NCHW':
y = array_ops.transpose(y, [0, 3, 1, 2])
return y.eval(), mean.eval(), var.eval()
def _test_training(self,
x_shape,
scale_shape,
use_gpu=True,
data_format='NHWC'):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name='x')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
epsilon = 0.001
y, mean, var = nn_impl.fused_batch_norm(
x,
scale,
offset,
epsilon=epsilon,
data_format=data_format,
is_training=True)
y_val, mean_val, var_val = sess.run([y, mean, var])
y_ref, mean_ref, var_ref = self._training_ref(x, scale, offset, epsilon,
data_format)
self.assertAllClose(y_ref, y_val, atol=1e-3)
self.assertAllClose(mean_ref, mean_val, atol=1e-3)
# This is for Bessel's correction. tf.nn.moments uses n, instead of n-1, as
# the denominator in the formula to calculate variance, while
# tf.nn.fused_batch_norm has Bessel's correction built in.
sample_size = x_val.size / scale_val.size
var_ref = var_ref * sample_size / (max(sample_size - 1.0, 1.0))
self.assertAllClose(var_ref, var_val, atol=1e-3)
def _test_gradient(self,
x_shape,
scale_shape,
use_gpu=True,
data_format='NHWC',
is_training=True):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
with self.test_session(use_gpu=use_gpu):
x = constant_op.constant(x_val, name='x')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
if is_training:
pop_mean = None
pop_var = None
else:
pop_mean = np.random.random_sample(scale_shape).astype(np.float32)
pop_var = np.random.random_sample(scale_shape).astype(np.float32)
y, _, _ = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=pop_mean,
variance=pop_var,
data_format=data_format,
is_training=is_training)
err_x = gradient_checker.compute_gradient_error(x, x_shape, y, x_shape)
err_scale = gradient_checker.compute_gradient_error(scale, scale_shape, y,
x_shape)
err_offset = gradient_checker.compute_gradient_error(offset, scale_shape,
y, x_shape)
err_tolerance = 1e-3
self.assertLess(err_x, err_tolerance)
self.assertLess(err_scale, err_tolerance)
self.assertLess(err_offset, err_tolerance)
def _test_grad_grad(self,
x_shape,
scale_shape,
use_gpu=True,
data_format='NHWC',
is_training=True,
err_tolerance=1e-3):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(np.float32)
grad_y_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name='x')
grad_y = constant_op.constant(grad_y_val, name='grad_y')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
if is_training:
pop_mean = None
pop_var = None
else:
pop_mean = np.random.random_sample(scale_shape).astype(np.float32)
pop_var = np.random.random_sample(scale_shape).astype(np.float32)
y, _, _ = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=pop_mean,
variance=pop_var,
data_format=data_format,
is_training=is_training)
grad_x, grad_scale, grad_offset = gradients_impl.gradients(
y, [x, scale, offset], grad_y)
if is_training:
epsilon = y.op.get_attr('epsilon')
data_format = y.op.get_attr('data_format')
grad_vals = sess.run([grad_x, grad_scale, grad_offset])
grad_internal = nn_grad._BatchNormGrad(grad_y, x, scale, epsilon,
data_format)
grad_internal_vals = sess.run(list(grad_internal))
for grad_val, grad_internal_val in zip(grad_vals, grad_internal_vals):
self.assertAllClose(grad_val, grad_internal_val, atol=err_tolerance)
err_grad_grad_y_1 = gradient_checker.compute_gradient_error(
grad_y, x_shape, grad_x, x_shape)
err_grad_grad_y_2 = gradient_checker.compute_gradient_error(
grad_y, x_shape, grad_scale, scale_shape)
err_grad_grad_y_3 = gradient_checker.compute_gradient_error(
grad_y, x_shape, grad_offset, scale_shape)
# In freeze mode, grad_x is not a function of x.
if is_training:
err_grad_x_1 = gradient_checker.compute_gradient_error(
x, x_shape, grad_x, x_shape)
err_grad_x_2 = gradient_checker.compute_gradient_error(
x, x_shape, grad_scale, scale_shape)
err_grad_scale = gradient_checker.compute_gradient_error(
scale, scale_shape, grad_x, x_shape)
self.assertLess(err_grad_grad_y_1, err_tolerance)
self.assertLess(err_grad_grad_y_2, err_tolerance)
self.assertLess(err_grad_grad_y_3, err_tolerance)
if is_training:
self.assertLess(err_grad_x_1, err_tolerance)
self.assertLess(err_grad_x_2, err_tolerance)
self.assertLess(err_grad_scale, err_tolerance)
def testInference(self):
x_shape = [1, 1, 6, 1]
if test.is_gpu_available(cuda_only=True):
self._test_inference(x_shape, [1], use_gpu=True, data_format='NHWC')
self._test_inference(x_shape, [1], use_gpu=True, data_format='NCHW')
self._test_inference(x_shape, [1], use_gpu=False, data_format='NHWC')
x_shape = [1, 1, 6, 2]
if test.is_gpu_available(cuda_only=True):
self._test_inference(x_shape, [2], use_gpu=True, data_format='NHWC')
self._test_inference(x_shape, [2], use_gpu=False, data_format='NHWC')
x_shape = [1, 2, 1, 6]
if test.is_gpu_available(cuda_only=True):
self._test_inference(x_shape, [2], use_gpu=True, data_format='NCHW')
x_shape = [27, 131, 127, 6]
if test.is_gpu_available(cuda_only=True):
self._test_inference(x_shape, [131], use_gpu=True, data_format='NCHW')
self._test_inference(x_shape, [6], use_gpu=True, data_format='NHWC')
self._test_inference(x_shape, [6], use_gpu=False, data_format='NHWC')
def testTraining(self):
x_shape = [1, 1, 6, 1]
if test.is_gpu_available(cuda_only=True):
self._test_training(x_shape, [1], use_gpu=True, data_format='NHWC')
self._test_training(x_shape, [1], use_gpu=True, data_format='NCHW')
self._test_training(x_shape, [1], use_gpu=False, data_format='NHWC')
x_shape = [1, 1, 6, 2]
if test.is_gpu_available(cuda_only=True):
self._test_training(x_shape, [2], use_gpu=True, data_format='NHWC')
self._test_training(x_shape, [2], use_gpu=False, data_format='NHWC')
x_shape = [1, 2, 1, 6]
if test.is_gpu_available(cuda_only=True):
self._test_training(x_shape, [2], use_gpu=True, data_format='NCHW')
x_shape = [27, 131, 127, 6]
if test.is_gpu_available(cuda_only=True):
self._test_training(x_shape, [131], use_gpu=True, data_format='NCHW')
self._test_training(x_shape, [6], use_gpu=True, data_format='NHWC')
self._test_training(x_shape, [6], use_gpu=False, data_format='NHWC')
def testBatchNormGrad(self):
for is_training in [True, False]:
x_shape = [1, 1, 6, 1]
if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape, [1],
use_gpu=True,
data_format='NHWC',
is_training=is_training)
self._test_gradient(
x_shape, [1],
use_gpu=True,
data_format='NCHW',
is_training=is_training)
self._test_gradient(
x_shape, [1],
use_gpu=False,
data_format='NHWC',
is_training=is_training)
x_shape = [1, 1, 6, 2]
if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape, [2],
use_gpu=True,
data_format='NHWC',
is_training=is_training)
self._test_gradient(
x_shape, [2],
use_gpu=False,
data_format='NHWC',
is_training=is_training)
x_shape = [1, 2, 1, 6]
if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape, [2],
use_gpu=True,
data_format='NCHW',
is_training=is_training)
x_shape = [7, 9, 13, 6]
if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape, [9],
use_gpu=True,
data_format='NCHW',
is_training=is_training)
self._test_gradient(
x_shape, [6],
use_gpu=True,
data_format='NHWC',
is_training=is_training)
self._test_gradient(
x_shape, [6],
use_gpu=False,
data_format='NHWC',
is_training=is_training)
def _testBatchNormGradGrad(self, config):
shape = config['shape']
err_tolerance = config['err_tolerance']
for is_training in [True, False]:
if test.is_gpu_available(cuda_only=True):
self._test_grad_grad(
shape, [shape[3]],
use_gpu=True,
data_format='NHWC',
is_training=is_training,
err_tolerance=err_tolerance)
self._test_grad_grad(
shape, [shape[1]],
use_gpu=True,
data_format='NCHW',
is_training=is_training,
err_tolerance=err_tolerance)
self._test_grad_grad(
shape, [shape[3]],
use_gpu=False,
data_format='NHWC',
is_training=is_training,
err_tolerance=err_tolerance)
def testBatchNormGradGrad(self):
configs = [{
'shape': [2, 3, 4, 5],
'err_tolerance': 1e-2
}, {
'shape': [2, 3, 2, 2],
'err_tolerance': 1e-3
}]
for config in configs:
self._testBatchNormGradGrad(config)
if __name__ == '__main__':
test.main()
|
|
#!/usr/bin/python3.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Classes and functions for controlling, reading, and writing to co-processes.
"""
import sys
import os
import signal
from signal import SIGCHLD, SIGTERM, SIGSTOP, SIGCONT, SIGHUP, SIG_DFL, SIGINT
from errno import EBADF, EIO
from pycopia import logging
from pycopia import shparser
from pycopia.aid import NULL
from pycopia.OS.procfs import ProcStat
from pycopia.OS.exitstatus import ExitStatus
from pycopia.OS.procutils import run_as
from pycopia.OS import scheduler
from pycopia.fileutils import close_on_exec, set_nonblocking
class ProcessError(Exception):
pass
class Process:
"""Abstract base class for Processes. Handles all process handling, and
some common functionality. I/O is handled in subclasses.
"""
def __init__(self, cmdline, logfile=None, callback=None, async=False):
self.cmdline = cmdline
self.deadchild = 0
self.closed = False
self.callback = callback # called at death of process
self._restart = True # restart interrupted system calls
self._buf = b''
self._errbuf = b''
self._writebuf = b''
self.exitstatus = None
self._environment = None
self._async = bool(async) # use asyncio, or not
self._authtoken = None
self.logfile = logfile
def __enter__(self):
return self
def __exit__(self, extype, exvalue, traceback):
self.close()
return False
# Override in subclass -- close your file descriptors connected to
# subprocess.
def close(self):
self.closed = True
def __repr__(self):
return "{0}({1!r}, async={2!r})".format(
self.__class__.__name__, self.cmdline, self._async)
def __str__(self):
if self.deadchild:
return str(self.exitstatus)
else:
st = self.stat()
try:
tty = os.ttyname(self.fileno())
except:
tty = "?"
return "{:6d} {:7s} ({}) {}".format(st.pid, tty, st.statestr(),
self.cmdline)
def __int__(self):
return self.childpid
def __hash__(self):
return id(self)
def restart(self, flag=1):
old = self._restart
self._restart = bool(flag)
return old
def clone(self):
"""Spawns a copy of this process.
Note that the log file is not inherited.
"""
return self.__class__(self.cmdline, env=self.environment,
callback=self.callback, async=self._async)
@property
def logfile(self):
"""A bytes file-like object that IO will be written to."""
return self._log
@logfile.setter
def logfile(self, newlog):
if newlog is None:
self._log = newlog
return
newlog.write # asserts newlog has write method
try:
self._log = newlog.buffer
except AttributeError:
self._log = newlog
@logfile.deleter
def logfile(self):
self._log = None
@property
def environment(self):
if self._environment is None:
ps = ProcStat(self.childpid)
self._environment = ps.environment
return self._environment
@environment.setter
def environment(self, env):
assert isinstance(env, dict), "Environment must be a dictionary"
self._environment = env
@environment.deleter
def environment(self):
self._environment = None
@property
def basename(self):
return os.path.basename(self.cmdline.split()[0])
def kill(self, sig=SIGINT):
if not self.deadchild:
self.set_callback(None) # explicit kill means no restart
os.kill(self.childpid, sig)
def killwait(self, sig=SIGINT):
self.kill(sig)
return self.wait()
def stop(self):
os.kill(self.childpid, SIGSTOP)
def cont(self):
os.kill(self.childpid, SIGCONT)
self.deadchild = 0
def hangup(self):
os.kill(self.childpid, SIGHUP)
def wait(self, option=0):
"""wait() retrieves process exit status. Note that this may block if
the process is still running.
"""
if self.exitstatus is not None:
return self.exitstatus
else:
pm = get_procmanager()
return pm.waitproc(self)
def setpgid(self, pgid):
os.setpgid(self.childpid, pgid)
def set_exitstatus(self, exitstatus):
self.exitstatus = exitstatus
def set_callback(self, cb=None):
"""set_callback(cb) Sets the callback function that will be
called when child dies. """
self.callback = cb
def dead(self):
"""dead() Called when the child dies. Usually only the
ProcManager uses this."""
self.deadchild = 1
if self.callback:
self.callback(self)
def stat(self):
return ProcStat(self.childpid)
def fstat(self):
return os.fstat(self.fileno())
def isdead(self):
return self.deadchild
# Process object considered true if child alive, false if child dead.
def __bool__(self):
return not self.deadchild
def read(self, amt=2147483646):
if amt < 0:
amt = 2147483646
bs = len(self._buf)
try:
while bs < amt:
c = self._read(4096)
if not c:
break
self._buf += c
bs = len(self._buf)
except EOFError: # TODO log an error
pass # let it ruturn rest of buffer
data = self._buf[:amt]
self._buf = self._buf[amt:]
return data
def readerr(self, amt=2147483646):
if amt < 0:
amt = 2147483646
rs = 1024
try:
while len(self._errbuf) < amt:
c = self._readerr(rs)
if not c:
break
self._errbuf += c
except EOFError:
pass
amt = min(amt, len(self._errbuf))
data = self._errbuf[:amt]
self._errbuf = self._errbuf[amt:]
return data
# extra fileobject methods.
def readline(self, amt=2147483646):
if amt < 0:
amt = 2147483646
bufs = []
rs = min(100, amt)
while 1:
c = self.read(rs)
i = c.find(b"\n")
if i < 0 and len(c) > amt:
i = amt-1
elif amt <= i:
i = amt-1
if i >= 0 or c == b'':
bufs.append(c[:i+1])
self._unread(c[i+1:])
return b"".join(bufs)
bufs.append(c)
amt -= len(c)
rs = min(amt, rs*2)
def readlines(self, sizehint=2147483646):
if sizehint < 0:
sizehint = 2147483646
rv = []
while sizehint > 0:
line = self.readline()
if not line:
break
rv.append(line)
sizehint -= len(line)
return rv
def _write_buf(self):
writ = self._write(self._writebuf)
self._writebuf = self._writebuf[writ:]
return writ
def write(self, data):
while self._writebuf:
writ = self._write(self._writebuf)
self._writebuf = data[writ:]
writ = self._write(data)
self._writebuf = data[writ:]
return writ
send = write
def tell(self):
raise IOError((EBADF, "Process object not seekable"))
def seek(self, pos, whence=0):
raise IOError((EBADF, "Process object not seekable"))
def rewind(self):
raise IOError((EBADF, "Process object not seekable"))
def flush(self):
return None
def _unread(self, data):
self._buf = data + self._buf
# Interface for asyncio poller.
def readable(self):
return True
def writable(self):
return bool(self._writebuf)
def priority(self):
return False
def read_handler(self):
data = self._read(16384)
logging.warning("unhandled read from process")
def write_handler(self):
self._write_buf()
def pri_handler(self):
pass
def hangup_handler(self):
logging.info("Hangup: {}.\n".format(self.cmdline))
def error_handler(self):
logging.error(
"Async handler error occured: {}.\n".format(self.basename))
def exception_handler(self, ex, val, tb):
logging.error("Error event: {} ({})\n".format(ex, val))
class ProcessPipe(Process):
"""Process(<commandline>, [<logfile>], [environ])
Forks and execs a process as given by the command line argument. The
process's stdio is connected to this instance via pipes, and can be read
and written to by the instances read() and write() methods.
"""
def __init__(self, cmdline, logfile=None, env=None, callback=None,
merge=1, pwent=None, async=False, devnull=None, _pgid=0):
super().__init__(cmdline, logfile, callback, async)
if env:
self.environment = env
cmd = split_command_line(self.cmdline)
# now, fork the child connected by pipes
p2cread, self._p_stdin = os.pipe()
os.set_inheritable(p2cread, True)
self._p_stdout, c2pwrite = os.pipe()
os.set_inheritable(c2pwrite, True)
if merge:
self._stderr, c2perr = None, None
else:
self._stderr, c2perr = os.pipe()
self.childpid = os.fork()
self.childpid2 = None # for compatibility with pipeline
if self.childpid == 0:
# Child
os.setpgid(0, _pgid)
os.close(0)
os.close(1)
os.close(2)
os.dup2(p2cread, 0)
os.close(p2cread)
os.dup2(c2pwrite, 1)
if merge:
os.dup2(c2pwrite, 2)
else:
os.dup2(c2perr, 2)
os.close(c2perr)
os.close(c2pwrite)
try:
if pwent:
run_as(pwent)
if env:
os.execvpe(cmd[0], cmd, env)
else:
os.execvp(cmd[0], cmd)
finally:
os._exit(127)
# Shouldn't come here
os._exit(127)
# parent
os.close(p2cread)
os.close(c2pwrite)
if c2perr:
os.close(c2perr)
def isatty(self):
return os.isatty(self._p_stdin)
def fileno(self):
if self._p_stdout is None:
raise ValueError("I/O operation on closed process")
return self._p_stdout
def filenos(self):
"""filenos() Returns tuple of all file descriptors used in this object.
"""
if self._p_stdout is None:
raise ValueError("I/O operation on closed process")
return self._p_stdout, self._p_stdin, self._stderr
def nonblocking(self, flag=1):
for fd in self._p_stdout, self._p_stdin, self._stderr:
set_nonblocking(fd, flag)
def interrupt(self):
self.kill(SIGINT)
def close(self):
if self.closed:
return
super(ProcessPipe, self).close()
try:
os.close(self._p_stdin)
except (TypeError, OSError):
pass
try:
os.close(self._p_stdout)
except (TypeError, OSError):
pass
if self._stderr:
try:
os.close(self._stderr)
except (TypeError, OSError):
pass
self._stderr = None
self._p_stdin = None
self._p_stdout = None
self.callback = None # break a possible reference loop
def _write(self, data):
return os.write(self._p_stdin, data)
def _read_fd(self, fd, length):
data = os.read(fd, length)
if self._log is not None:
self._log.write(data)
return data
def _read(self, amt=4096):
if self._p_stdout is None:
return b""
return self._read_fd(self._p_stdout, amt)
def _readerr(self, amt):
if self._stderr is None:
return b""
return self._read_fd(self._stderr, amt)
class ProcessPty(Process):
"""ProcessPty(<commandline>, [<logfilename>], [environ])
Forks and execs a process as given by the command line argument. The
process's stdio is connected to this instance via a pty, and can be read
and written to by the instances read() and write() methods. That pty
becomes the processes controlling terminal.
"""
def __init__(self, cmdline, logfile=None, env=None, callback=None,
merge=1, pwent=None, async=False, devnull=False, _pgid=0):
super().__init__(cmdline, logfile, callback, async)
if env:
self.environment = env
cmd = split_command_line(self.cmdline)
try:
pid, self._fd = os.forkpty()
except OSError as err:
logging.error("ProcessPty error: {}".format(err))
raise
else:
if pid == 0: # child
sys.excepthook = sys.__excepthook__
if devnull:
# Redirect standard file descriptors.
sys.stdout.flush()
sys.stderr.flush()
os.close(sys.__stdin__.fileno())
os.close(sys.__stdout__.fileno())
os.close(sys.__stderr__.fileno())
# stdin always from /dev/null
sys.stdin = open("/dev/null", 'r')
os.dup2(sys.stdin.fileno(), 0)
# log file is stdout and stderr, otherwise /dev/null
if logfile is None:
sys.stdout = open("/dev/null", 'a+')
sys.stderr = open("/dev/null", 'a+', 0)
os.dup2(sys.stdout.fileno(), 1)
os.dup2(sys.stderr.fileno(), 2)
else:
so = se = sys.stdout = sys.stderr = logfile
os.dup2(so.fileno(), 1)
os.dup2(se.fileno(), 2)
try:
if pwent:
run_as(pwent)
if env:
os.execvpe(cmd[0], cmd, env)
else:
os.execvp(cmd[0], cmd)
finally:
os._exit(127) # should not be reached
else: # parent
close_on_exec(self._fd)
self.childpid = pid
self.childpid2 = None # for compatibility with pipeline
self._intr = None
self._eof = None
def isatty(self):
return os.isatty(self._fd)
def fileno(self):
if self._fd is None:
raise ValueError("I/O operation on closed process")
return self._fd
def filenos(self):
"""filenos() Returns tuple of all file descriptors used in this object.
"""
if self._fd is None:
raise ValueError("I/O operation on closed process")
return (self._fd,)
def nonblocking(self, flag=1):
set_nonblocking(self._fd, flag)
def interrupt(self):
"""Like pressing Ctl-C on most terminals."""
if self._intr is None:
from pycopia import tty
self._intr = tty.get_intr_char(self._fd)
self._write(self._intr)
def send_eof(self):
"""Like pressing Ctl-D on most terminals."""
if self._eof is None:
from pycopia import tty
self._eof = tty.get_eof_char(self._fd)
self._write(self._eof)
def close(self):
if self.closed:
return
super(ProcessPty, self).close()
try:
os.close(self._fd)
except (TypeError, OSError):
pass
self._fd = None
self.callback = None # break a possible reference loop
def _write(self, data):
return os.write(self._fd, data)
def _read(self, length=100):
data = os.read(self._fd, length)
if self._log is not None:
self._log.write(data)
return data
class CoProcessPty(ProcessPty):
def __init__(self, method, logfile=None, env=None,
callback=None, async=False, pwent=None, _pgid=0):
super().__init__("python: %s" % (method.__name__,),
logfile, callback, async)
pid, self._fd = os.forkpty()
self.childpid = pid
self.childpid2 = None # for compatibility with pipeline
if pid == 0 and pwent:
run_as(pwent)
class CoProcessPipe(ProcessPipe):
def __init__(self, method, logfile=None, env=None,
callback=None, merge=False, async=False, pwent=None, _pgid=0):
super().__init__("python <=> %s" % (method.__name__,), logfile,
callback, async)
p2cread, self._p_stdin = os.pipe()
os.set_inheritable(p2cread, True)
self._p_stdout, c2pwrite = os.pipe()
os.set_inheritable(c2pwrite, True)
if merge:
self._stderr, c2perr = None, None
else:
self._stderr, c2perr = os.pipe()
os.set_inheritable(c2perr, True)
self.childpid = os.fork()
self.childpid2 = None
if self.childpid == 0:
try:
# Child
os.close(0)
os.close(1)
os.close(2)
os.dup2(p2cread, 0)
# sys.stdin = os.fdopen(0, mode="r")
os.dup2(c2pwrite, 1)
# sys.stdout = os.fdopen(1, mode="w")
if merge:
os.dup2(c2pwrite, 2)
# sys.stderr = os.fdopen(2, mode="w")
else:
os.dup2(c2perr, 2)
# sys.stderr = os.fdopen(2, mode="w")
if pwent:
run_as(pwent)
except Exception:
logging.exception_error("CoProcessPipe")
os.close(p2cread)
os.close(c2pwrite)
if c2perr:
os.close(c2perr)
# simply forks this python process
class SubProcess(Process):
def __init__(self, pwent=None, _pgid=0):
super().__init__(sys.argv[0])
pid = os.fork()
if pid == 0:
sys.excepthook = sys.__excepthook__ # remove any debugger hook
if pwent:
run_as(pwent)
self.childpid = pid
self.childpid2 = None # for compatibility with pipeline
# TODO need a more general pipeline
class ProcessPipeline(ProcessPipe):
"""Connects two commands via a pipe, they appear as one process object."""
def __init__(self, cmdline, logfile=None, env=None, callback=None,
merge=None, pwent=None, async=False, devnull=None, _pgid=0):
assert cmdline.count("|") == 1
[cmdline1, cmdline2] = cmdline.split("|")
if env:
self.environment = env
super().__init__(cmdline2, logfile, callback, async)
self._stderr = None
cmd1 = split_command_line(cmdline1)
cmd2 = split_command_line(cmdline2)
# self._p_stdin -> cmd1 -> p_write|p_read -> cmd2 -> self._p_stdout
_p_stdout, self._p_stdin = os.pipe()
p_read, p_write = os.pipe()
self._p_stdout, _p_stdin = os.pipe()
self.childpid = os.fork()
# cmd1
if self.childpid == 0:
# Child 1
os.dup2(_p_stdout, 0)
os.dup2(p_write, 1)
self._exec(cmd1, env, pwent)
os._exit(127)
# cmd2
cmd2pid = os.fork()
if cmd2pid == 0:
# Child 2
os.dup2(p_read, 0)
os.dup2(_p_stdin, 1)
self._exec(cmd2, env, pwent)
os._exit(127)
self.childpid2 = cmd2pid
# close our copies
os.close(_p_stdout)
os.close(_p_stdin)
os.close(p_read)
os.close(p_write)
def _exec(self, cmd, env, pwent):
# close all other file descriptors for child.
if pwent:
run_as(pwent)
if env:
os.execvpe(cmd[0], cmd, env)
else:
os.execvp(cmd[0], cmd)
class ProcManager(object):
"""An instance of ProcManager manages a collection of child processes. It
is a singleton, and you should use the get_procmanager() factory function
to get the instance. """
def __init__(self):
self._pgid = os.getpgid(0)
self._procs = {}
signal.signal(SIGCHLD, self._child_handler)
signal.siginterrupt(SIGCHLD, False)
def __len__(self):
return len(self._procs)
def __str__(self):
s = []
for p in self.getprocs():
s.append(str(p))
return "\n".join(s)
def spawnprocess(self, pklass, cmd, logfile=None, env=None, callback=None,
persistent=False, merge=True, pwent=None, async=False,
devnull=False):
"""Start a child process using a user supplied subclass of ProcessPty
or ProcessPipe.
"""
if persistent and (callback is None):
callback = self.respawn_callback
signal.signal(SIGCHLD, SIG_DFL) # critical area
proc = pklass(cmd, logfile=logfile, env=env, callback=callback,
merge=merge, pwent=pwent, async=async, devnull=devnull,
_pgid=self._pgid)
self._procs[proc.childpid] = proc
# TODO need a more general pipeline
if proc.childpid2:
self._procs[proc.childpid2] = proc
signal.signal(SIGCHLD, self._child_handler)
signal.siginterrupt(SIGCHLD, False)
return proc
def spawnpipe(self, cmd, logfile=None, env=None, callback=None,
persistent=False, merge=True, pwent=None, async=False,
devnull=False):
"""Start a child process, connected by pipes."""
if cmd.find("|") > 0:
klass = ProcessPipeline
else:
klass = ProcessPipe
return self.spawnprocess(klass, cmd, logfile, env, callback,
persistent, merge, pwent, async, devnull)
# default spawn method
spawn = spawnpipe
def spawnpty(self, cmd, logfile=None, env=None, callback=None,
persistent=False, merge=True, pwent=None, async=False,
devnull=False):
"""Start a child process using a pty. The <persistent> variable is the
number of times the process will be respawned if the previous
invocation dies.
"""
return self.spawnprocess(ProcessPty, cmd, logfile, env, callback,
persistent, merge, pwent, async, devnull)
def coprocess(self, method, args=(), logfile=None, env=None, callback=None,
async=False):
signal.signal(SIGCHLD, SIG_DFL) # critical area
proc = CoProcessPipe(method, logfile=logfile, env=env,
callback=callback, async=async)
if proc.childpid == 0:
os.setpgid(0, self._pgid)
sys.excepthook = sys.__excepthook__
# child is not managing any of these
self._procs.clear()
try:
rv = method(*args)
except SystemExit as val:
rv = int(val)
except Exception:
logging.exception_error("coprocess")
rv = 127
if rv is None:
rv = 0
try:
rv = int(rv)
except:
rv = 0
os._exit(rv)
self._procs[proc.childpid] = proc
signal.signal(SIGCHLD, self._child_handler)
signal.siginterrupt(SIGCHLD, False)
return proc
def subprocess(self, _method, *args, **kwargs):
return self.submethod(_method, args, kwargs)
def submethod(self, _method, args=None, kwargs=None, pwent=None):
args = args or ()
kwargs = kwargs or {}
signal.signal(SIGCHLD, SIG_DFL) # critical area
proc = SubProcess(pwent=pwent)
if proc.childpid == 0: # in child
os.setpgid(0, self._pgid)
sys.excepthook = sys.__excepthook__
self._procs.clear()
try:
rv = _method(*args, **kwargs)
except SystemExit as val:
rv = val.code
except:
ex, val, tb = sys.exc_info()
try:
import traceback
try:
fname = _method.__name__
except AttributeError:
try:
fname = _method.__class__.__name__
except AttributeError:
fname = str(_method)
with open("/tmp/" + fname + "_error.log", "w+") as errfile:
traceback.print_exception(ex, val, tb, None, errfile)
finally:
ex = val = tb = None
rv = 127
if rv is None:
rv = 0
try:
rv = int(rv)
except:
rv = 0
os._exit(rv)
else:
self._procs[proc.childpid] = proc
signal.signal(SIGCHLD, self._child_handler)
signal.siginterrupt(SIGCHLD, False)
return proc
# introspection and query methods
def getpids(self):
"""getpids() Returns a list of managed PIDs (which are integers)."""
return list(self._procs.keys())
def getprocs(self):
"""getprocs() Returns a list of managed process objects."""
return list(self._procs.values())
def getbyname(self, name):
"""getbyname(procname) Returns a list of process objects that match the
given name.
"""
name = os.path.basename(name)
return [p for p in list(self._procs.values()) if p.basename == name]
def getbypid(self, pid):
"""getbypid(pid) Returns the process object that matches the given PID.
"""
try:
return self._procs[pid]
except KeyError:
return None
def getstats(self):
"""getstats() Returns a list of process status objects (ProcStat) for
each managed process.
"""
return [ProcStat(o) for o in list(self._procs.keys())]
def killall(self, name=None, sig=SIGTERM):
"""Kills all managed processes with the name 'name'. If 'name' not
given kill ALL processes. Default signal is SIGTERM.
"""
if name is None:
procs = list(self._procs.values())
else:
procs = self.getbyname(name)
for p in procs:
p.close()
p.kill(sig)
def kill(self, proc, sig=SIGINT):
proc.kill(sig)
def stopall(self):
"""Sends STOP to all managed processes. To restart get the
process objects and invoke the cont() method.
"""
for p in list(self._procs.values()):
p.stop()
def clone(self, proc=None):
"""clone([proc]) clones the supplied process object and manages it as
well. If no process object is supplied then clone the first managed
process found in this ProcManager.
"""
if proc is None: # default to cloning first process found.
procs = list(self._procs.values())
if procs:
proc = procs[0]
del procs
else:
return
signal.signal(SIGCHLD, SIG_DFL) # critical area
newproc = proc.clone()
self._procs[newproc.childpid] = newproc
signal.signal(SIGCHLD, self._child_handler)
signal.siginterrupt(SIGCHLD, False)
return newproc
def respawn_callback(self, deadproc):
"""Callback that performs a respawn, for persistent services."""
if deadproc.exitstatus.status == 127:
logging.error("process {!r} didn't start (NOT restarting).\n".format( # noqa
deadproc.cmdline))
raise ProcessError("Process never started. Check command line.")
elif not deadproc.exitstatus:
logging.error("process {!r} died: %s (restarting in 1 sec.).\n".format( # noqa
deadproc.cmdline, deadproc.exitstatus))
scheduler.add(self._respawn, 1.0, args=(deadproc,))
else:
logging.info("process {!r} normal exit (NOT restarting).\n".format(
deadproc.cmdline))
return None
def _respawn(self, deadproc):
new = self.clone(deadproc)
new._log = deadproc._log
# this is the SIGCHLD signal handler
def _child_handler(self, sig, stack):
pid, sts = os.waitpid(-1, os.WNOHANG)
proc = self._procs.get(pid)
if proc is not None:
self._proc_status(proc, sts)
signal.signal(SIGCHLD, self._child_handler)
signal.siginterrupt(SIGCHLD, False)
def waitpid(self, pid, option=0):
try:
proc = self._procs[pid]
except KeyError:
logging.warning("Wait on unmanaged process (pid: %s)." % pid)
cmdline = ProcStat(pid).cmdline
pid, sts = os.waitpid(pid, option)
return ExitStatus(sts, cmdline.split()[0])
return self.waitproc(proc)
def waitproc(self, proc, option=0): # waits for a Process object.
"""waitproc(process, [option])
Waits for a process object to finish. Depends on signal handler.
"""
if proc.exitstatus is not None:
return proc.exitstatus
signal.signal(SIGCHLD, SIG_DFL)
try:
pid, sts = os.waitpid(proc.childpid, option)
finally:
signal.signal(SIGCHLD, self._child_handler)
signal.siginterrupt(SIGCHLD, False)
return self._proc_status(proc, sts)
def _proc_status(self, proc, sts):
es = ExitStatus(sts, proc.cmdline.split()[0])
proc.set_exitstatus(es)
# XXX untested with stopped processes
if es.state != ExitStatus.STOPPED:
proc.dead()
del self._procs[proc.childpid]
return es
def loop(self, poller, timeout=-1.0, callback=NULL):
while self._procs:
poller.poll(timeout)
callback(self)
if scheduler.get_scheduler(): # wait for any restarts
scheduler.sleep(1.5)
def get_procmanager():
"""get_procmanager() returns the procmanager. A ProcManager is a singleton
instance. Always use this factory function to get it."""
global procmanager
try:
return procmanager
except NameError:
procmanager = ProcManager()
return procmanager
def remove_procmanager():
global procmanager
signal.signal(SIGCHLD, SIG_DFL)
del procmanager
# Process manager factory functions
def spawnpipe(cmd, logfile=None, env=None, callback=None,
persistent=False, merge=True, pwent=None, async=False):
"""Start a child process, connected by pipes.
"""
pm = get_procmanager()
proc = pm.spawnpipe(cmd, logfile, env, callback, persistent, merge, pwent,
async)
return proc
def spawnpty(cmd, logfile=None, env=None, callback=None,
persistent=False, merge=True, pwent=None, async=False,
devnull=False):
"""Start a child process using a pty.
"""
pm = get_procmanager()
proc = pm.spawnpty(cmd, logfile, env, callback, persistent, merge, pwent,
async, devnull)
return proc
def coprocess(func, args=(), logfile=None, env=None, callback=None,
async=False):
"""Works like fork(), but connects the childs stdio to a pty. Returns a
file-like object connected to the master end of the child pty.
"""
pm = get_procmanager()
cp = pm.coprocess(func, args, logfile, env, callback, async)
return cp
def waitproc(proc, option=0):
pm = get_procmanager()
return pm.waitproc(proc, option)
def subprocess(method, *args, **kwargs):
pm = get_procmanager()
return pm.subprocess(method, *args, **kwargs)
def submethod(_method, args=None, kwargs=None, pwent=None):
pm = get_procmanager()
return pm.submethod(_method, args, kwargs, pwent)
def getstatusoutput(cmd, logfile=None, env=None, callback=None):
p = spawnpipe(cmd, logfile, env, callback)
text = p.read()
p.wait()
return p.exitstatus, text
def call(*args, **kwargs):
return spawnpipe(*args, **kwargs).wait()
def setpgid(pid_or_proc, pgrp):
pid = int(pid_or_proc)
return os.setpgid(pid, pgrp)
split_command_line = shparser.get_command_splitter()
|
|
# Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import sys
import time
import eventlet
eventlet.monkey_patch()
from oslo.config import cfg
from oslo import messaging
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config as common_config
from neutron.common import constants as q_constants
from neutron.common import topics
from neutron.common import utils as q_utils
from neutron import context
from neutron.i18n import _LE, _LI
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.plugins.sriovnicagent.common import config # noqa
from neutron.plugins.sriovnicagent.common import exceptions as exc
from neutron.plugins.sriovnicagent import eswitch_manager as esm
LOG = logging.getLogger(__name__)
class SriovNicSwitchRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin):
# Set RPC API version to 1.0 by default.
# history
# 1.1 Support Security Group RPC
target = messaging.Target(version='1.1')
def __init__(self, context, agent):
super(SriovNicSwitchRpcCallbacks, self).__init__()
self.context = context
self.agent = agent
self.sg_agent = agent
def port_update(self, context, **kwargs):
LOG.debug("port_update received")
port = kwargs.get('port')
# Put the port mac address in the updated_devices set.
# Do not store port details, as if they're used for processing
# notifications there is no guarantee the notifications are
# processed in the same order as the relevant API requests.
self.agent.updated_devices.add(port['mac_address'])
LOG.debug("port_update RPC received for port: %s", port['id'])
class SriovNicSwitchPluginApi(agent_rpc.PluginApi,
sg_rpc.SecurityGroupServerRpcApiMixin):
pass
class SriovNicSwitchAgent(sg_rpc.SecurityGroupAgentRpcMixin):
def __init__(self, physical_devices_mappings, exclude_devices,
polling_interval, root_helper):
self.polling_interval = polling_interval
self.root_helper = root_helper
self.setup_eswitch_mgr(physical_devices_mappings,
exclude_devices)
configurations = {'device_mappings': physical_devices_mappings}
self.agent_state = {
'binary': 'neutron-sriov-nic-agent',
'host': cfg.CONF.host,
'topic': q_constants.L2_AGENT_TOPIC,
'configurations': configurations,
'agent_type': q_constants.AGENT_TYPE_NIC_SWITCH,
'start_flag': True}
# Stores port update notifications for processing in the main loop
self.updated_devices = set()
self._setup_rpc()
self.init_firewall()
# Initialize iteration counter
self.iter_num = 0
def _setup_rpc(self):
self.agent_id = 'nic-switch-agent.%s' % socket.gethostname()
LOG.info(_LI("RPC agent_id: %s"), self.agent_id)
self.topic = topics.AGENT
self.plugin_rpc = SriovNicSwitchPluginApi(topics.PLUGIN)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
# RPC network init
self.context = context.get_admin_context_without_session()
# Handle updates from service
self.endpoints = [SriovNicSwitchRpcCallbacks(self.context, self)]
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.DELETE],
[topics.SECURITY_GROUP, topics.UPDATE]]
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers)
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def _report_state(self):
try:
devices = len(self.eswitch_mgr.get_assigned_devices())
self.agent_state.get('configurations')['devices'] = devices
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_LE("Failed reporting state!"))
def setup_eswitch_mgr(self, device_mappings, exclude_devices={}):
self.eswitch_mgr = esm.ESwitchManager(device_mappings,
exclude_devices,
self.root_helper)
def scan_devices(self, registered_devices, updated_devices):
curr_devices = self.eswitch_mgr.get_assigned_devices()
device_info = {}
device_info['current'] = curr_devices
device_info['added'] = curr_devices - registered_devices
# we don't want to process updates for devices that don't exist
device_info['updated'] = updated_devices & curr_devices
# we need to clean up after devices are removed
device_info['removed'] = registered_devices - curr_devices
return device_info
def _device_info_has_changes(self, device_info):
return (device_info.get('added')
or device_info.get('updated')
or device_info.get('removed'))
def process_network_devices(self, device_info):
resync_a = False
resync_b = False
self.prepare_devices_filter(device_info.get('added'))
if device_info.get('updated'):
self.refresh_firewall()
# Updated devices are processed the same as new ones, as their
# admin_state_up may have changed. The set union prevents duplicating
# work when a device is new and updated in the same polling iteration.
devices_added_updated = (set(device_info.get('added'))
| set(device_info.get('updated')))
if devices_added_updated:
resync_a = self.treat_devices_added_updated(devices_added_updated)
if device_info.get('removed'):
resync_b = self.treat_devices_removed(device_info['removed'])
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def treat_device(self, device, pci_slot, admin_state_up):
if self.eswitch_mgr.device_exists(device, pci_slot):
try:
self.eswitch_mgr.set_device_state(device, pci_slot,
admin_state_up)
except exc.SriovNicError:
LOG.exception(_LE("Failed to set device %s state"), device)
return
if admin_state_up:
# update plugin about port status
self.plugin_rpc.update_device_up(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
LOG.info(_LI("No device with MAC %s defined on agent."), device)
def treat_devices_added_updated(self, devices):
try:
devices_details_list = self.plugin_rpc.get_devices_details_list(
self.context, devices, self.agent_id)
except Exception as e:
LOG.debug("Unable to get port details for devices "
"with MAC address %(devices)s: %(e)s",
{'devices': devices, 'e': e})
# resync is needed
return True
for device_details in devices_details_list:
device = device_details['device']
LOG.debug("Port with MAC address %s is added", device)
if 'port_id' in device_details:
LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
{'device': device, 'details': device_details})
profile = device_details['profile']
self.treat_device(device_details['device'],
profile.get('pci_slot'),
device_details['admin_state_up'])
else:
LOG.info(_LI("Device with MAC %s not defined on plugin"),
device)
return False
def treat_devices_removed(self, devices):
resync = False
for device in devices:
LOG.info(_LI("Removing device with mac_address %s"), device)
try:
dev_details = self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
except Exception as e:
LOG.debug("Removing port failed for device %(device)s "
"due to %(exc)s", {'device': device, 'exc': e})
resync = True
continue
if dev_details['exists']:
LOG.info(_LI("Port %s updated."), device)
else:
LOG.debug("Device %s not defined on plugin", device)
return resync
def daemon_loop(self):
sync = True
devices = set()
LOG.info(_LI("SRIOV NIC Agent RPC Daemon Started!"))
while True:
start = time.time()
LOG.debug("Agent rpc_loop - iteration:%d started",
self.iter_num)
if sync:
LOG.info(_LI("Agent out of sync with plugin!"))
devices.clear()
sync = False
device_info = {}
# Save updated devices dict to perform rollback in case
# resync would be needed, and then clear self.updated_devices.
# As the greenthread should not yield between these
# two statements, this will should be thread-safe.
updated_devices_copy = self.updated_devices
self.updated_devices = set()
try:
device_info = self.scan_devices(devices, updated_devices_copy)
if self._device_info_has_changes(device_info):
LOG.debug("Agent loop found changes! %s", device_info)
# If treat devices fails - indicates must resync with
# plugin
sync = self.process_network_devices(device_info)
devices = device_info['current']
except Exception:
LOG.exception(_LE("Error in agent loop. Devices info: %s"),
device_info)
sync = True
# Restore devices that were removed from this set earlier
# without overwriting ones that may have arrived since.
self.updated_devices |= updated_devices_copy
# sleep till end of polling interval
elapsed = (time.time() - start)
if (elapsed < self.polling_interval):
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!",
{'polling_interval': self.polling_interval,
'elapsed': elapsed})
self.iter_num = self.iter_num + 1
class SriovNicAgentConfigParser(object):
def __init__(self):
self.device_mappings = {}
self.exclude_devices = {}
def parse(self):
"""Parses device_mappings and exclude_devices.
Parse and validate the consistency in both mappings
"""
self.device_mappings = q_utils.parse_mappings(
cfg.CONF.SRIOV_NIC.physical_device_mappings)
self.exclude_devices = config.parse_exclude_devices(
cfg.CONF.SRIOV_NIC.exclude_devices)
self._validate()
def _validate(self):
"""Validate configuration.
Validate that network_device in excluded_device
exists in device mappings
"""
dev_net_set = set(self.device_mappings.itervalues())
for dev_name in self.exclude_devices.iterkeys():
if dev_name not in dev_net_set:
raise ValueError(_("Device name %(dev_name)s is missing from "
"physical_device_mappings") % {'dev_name':
dev_name})
def main():
common_config.init(sys.argv[1:])
common_config.setup_logging()
try:
config_parser = SriovNicAgentConfigParser()
config_parser.parse()
device_mappings = config_parser.device_mappings
exclude_devices = config_parser.exclude_devices
except ValueError:
LOG.exception(_LE("Failed on Agent configuration parse. "
"Agent terminated!"))
raise SystemExit(1)
LOG.info(_LI("Physical Devices mappings: %s"), device_mappings)
LOG.info(_LI("Exclude Devices: %s"), exclude_devices)
polling_interval = cfg.CONF.AGENT.polling_interval
root_helper = cfg.CONF.AGENT.root_helper
try:
agent = SriovNicSwitchAgent(device_mappings,
exclude_devices,
polling_interval,
root_helper)
except exc.SriovNicError:
LOG.exception(_LE("Agent Initialization Failed"))
raise SystemExit(1)
# Start everything.
LOG.info(_LI("Agent initialized successfully, now running... "))
agent.daemon_loop()
if __name__ == '__main__':
main()
|
|
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Multisize sliding window workload estimation functions.
"""
from contracts import contract
from neat.contracts_extra import *
from itertools import islice
from collections import deque
import logging
log = logging.getLogger(__name__)
@contract
def mean(data, window_size):
""" Get the data mean according to the window size.
:param data: A list of values.
:type data: list(number)
:param window_size: A window size.
:type window_size: int,>0
:return: The mean value.
:rtype: float
"""
return float(sum(data)) / window_size
@contract
def variance(data, window_size):
""" Get the data variance according to the window size.
:param data: A list of values.
:type data: list(number)
:param window_size: A window size.
:type window_size: int,>0
:return: The variance value.
:rtype: float
"""
m = mean(data, window_size)
return float(sum((x - m) ** 2 for x in data)) / (window_size - 1)
@contract
def acceptable_variance(probability, window_size):
""" Get the acceptable variance.
:param probability: The probability to use.
:type probability: number,>=0,<=1
:param window_size: A window size.
:type window_size: int,>0
:return: The acceptable variance.
:rtype: float
"""
return float(probability * (1 - probability)) / window_size
@contract
def estimate_probability(data, window_size, state):
""" Get the estimated probability.
:param data: A list of data values.
:type data: list(number)
:param window_size: The window size.
:type window_size: int,>0
:param state: The current state.
:type state: int,>=0
:return: The estimated probability.
:rtype: float,>=0
"""
return float(data.count(state)) / window_size
@contract
def update_request_windows(request_windows, previous_state, current_state):
""" Update and return the updated request windows.
:param request_windows: The previous request windows.
:type request_windows: list(deque)
:param previous_state: The previous state.
:type previous_state: int,>=0
:param current_state: The current state.
:type current_state: int,>=0
:return: The updated request windows.
:rtype: list(deque)
"""
request_windows[previous_state].append(current_state)
return request_windows
@contract
def update_estimate_windows(estimate_windows, request_windows,
previous_state):
""" Update and return the updated estimate windows.
:param estimate_windows: The previous estimate windows.
:type estimate_windows: list(list(dict))
:param request_windows: The current request windows.
:type request_windows: list(deque)
:param previous_state: The previous state.
:type previous_state: int,>=0
:return: The updated estimate windows.
:rtype: list(list(dict))
"""
request_window = request_windows[previous_state]
state_estimate_windows = estimate_windows[previous_state]
for state, estimate_window in enumerate(state_estimate_windows):
for window_size, estimates in estimate_window.items():
slice_from = len(request_window) - window_size
if slice_from < 0:
slice_from = 0
estimates.append(
estimate_probability(
list(islice(request_window, slice_from, None)),
window_size, state))
return estimate_windows
@contract
def update_variances(variances, estimate_windows, previous_state):
""" Updated and return the updated variances.
:param variances: The previous variances.
:type variances: list(list(dict))
:param estimate_windows: The current estimate windows.
:type estimate_windows: list(list(dict))
:param previous_state: The previous state.
:type previous_state: int,>=0
:return: The updated variances.
:rtype: list(list(dict))
"""
estimate_window = estimate_windows[previous_state]
for state, variance_map in enumerate(variances[previous_state]):
for window_size in variance_map:
estimates = estimate_window[state][window_size]
if len(estimates) < window_size:
variance_map[window_size] = 1.0
else:
variance_map[window_size] = variance(
list(estimates), window_size)
return variances
@contract
def update_acceptable_variances(acceptable_variances, estimate_windows, previous_state):
""" Update and return the updated acceptable variances.
:param acceptable_variances: The previous acceptable variances.
:type acceptable_variances: list(list(dict))
:param estimate_windows: The current estimate windows.
:type estimate_windows: list(list(dict))
:param previous_state: The previous state.
:type previous_state: int,>=0
:return: The updated acceptable variances.
:rtype: list(list(dict))
"""
estimate_window = estimate_windows[previous_state]
state_acc_variances = acceptable_variances[previous_state]
for state, acceptable_variance_map in enumerate(state_acc_variances):
for window_size in acceptable_variance_map:
estimates = estimate_window[state][window_size]
acceptable_variance_map[window_size] = acceptable_variance(
estimates[-1], window_size)
return acceptable_variances
@contract
def select_window(variances, acceptable_variances, window_sizes):
""" Select window sizes according to the acceptable variances.
:param variances: The variances.
:type variances: list(list(dict))
:param acceptable_variances: The acceptable variances.
:type acceptable_variances: list(list(dict))
:param window_sizes: The available window sizes.
:type window_sizes: list(int)
:return: The selected window sizes.
:rtype: list(list(int))
"""
n = len(variances)
selected_windows = []
for i in range(n):
selected_windows.append([])
for j in range(n):
selected_size = window_sizes[0]
for window_size in window_sizes:
if variances[i][j][window_size] > \
acceptable_variances[i][j][window_size]:
break
selected_size = window_size
selected_windows[i].append(selected_size)
return selected_windows
@contract
def select_best_estimates(estimate_windows, selected_windows):
""" Select the best estimates according to the selected windows.
:param estimate_windows: The estimate windows.
:type estimate_windows: list(list(dict))
:param selected_windows: The selected window sizes.
:type selected_windows: list(list(int))
:return: The selected best estimates.
:rtype: list(list(number))
"""
n = len(estimate_windows)
selected_estimates = []
for i in range(n):
selected_estimates.append([])
for j in range(n):
estimates = estimate_windows[i][j][selected_windows[i][j]]
if estimates:
selected_estimates[i].append(estimates[-1])
else:
selected_estimates[i].append(0.0)
return selected_estimates
@contract
def init_request_windows(number_of_states, max_window_size):
""" Initialize a request window data structure.
:param number_of_states: The number of states.
:type number_of_states: int,>0
:param max_window_size: The max size of the request windows.
:type max_window_size: int,>0
:return: The initialized request windows data structure.
:rtype: list(deque)
"""
return [deque([], max_window_size)
for _ in range(number_of_states)]
@contract
def init_variances(window_sizes, number_of_states):
""" Initialize a variances data structure.
:param window_sizes: The required window sizes.
:type window_sizes: list(int)
:param number_of_states: The number of states.
:type number_of_states: int,>0
:return: The initialized variances data structure.
:rtype: list(list(dict))
"""
variances = []
for i in range(number_of_states):
variances.append([])
for j in range(number_of_states):
variances[i].append(dict(zip(window_sizes,
len(window_sizes) * [1.0])))
return variances
@contract
def init_deque_structure(window_sizes, number_of_states):
""" Initialize a 3 level deque data structure.
:param window_sizes: The required window sizes.
:type window_sizes: list(int)
:param number_of_states: The number of states.
:type number_of_states: int,>0
:return: The initialized 3 level deque data structure.
:rtype: list(list(dict))
"""
structure = []
for i in range(number_of_states):
structure.append([])
for j in range(number_of_states):
structure[i].append(dict((size, deque([], size))
for size in window_sizes))
return structure
@contract
def init_selected_window_sizes(window_sizes, number_of_states):
""" Initialize a selected window sizes data structure.
:param window_sizes: The required window sizes.
:type window_sizes: list(int)
:param number_of_states: The number of states.
:type number_of_states: int,>0
:return: The initialized selected window sizes data structure.
:rtype: list(list(int))
"""
structure = []
for i in range(number_of_states):
structure.append([])
for j in range(number_of_states):
structure[i].append(window_sizes[0])
return structure
|
|
# -*- coding: utf-8
# Copyright 2017-2019 The FIAAS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=R0201
from datetime import datetime
import pytest
import pytz
import six
import mock
from k8s import config
from k8s.base import Model, SelfModel
from k8s.fields import Field, JSONField, ListField, OnceField, ReadOnlyField, RequiredField
from k8s.models.common import ObjectMeta
NAME = "my-model-test"
class ModelTest(Model):
class Meta:
url_template = "/apis/v1/modeltests/{name}"
metadata = Field(ObjectMeta)
field = Field(int)
list_field = ListField(int)
once_field = OnceField(int)
read_only_field = ReadOnlyField(int)
alt_type_field = Field(int, alt_type=six.text_type)
dict_field = Field(dict)
_exec = Field(int)
time_field = Field(datetime)
json_field = JSONField()
self_field = Field(SelfModel)
class TestFields(object):
@pytest.fixture(autouse=True)
def set_config_debug(self, monkeypatch):
monkeypatch.setattr(config, "debug", True)
@pytest.mark.parametrize("field_name,initial_value,other_value", (
("field", 1, 2),
("list_field", [1], [1, 2]),
("once_field", 1, 2),
("_exec", 1, 2),
("json_field", 1, [1, 2]),
))
def test_field_new(self, field_name, initial_value, other_value):
kwargs = {"new": True, field_name: initial_value}
model = ModelTest(**kwargs)
assert getattr(model, field_name) == initial_value
setattr(model, field_name, other_value)
assert getattr(model, field_name) == other_value
@pytest.mark.parametrize("field_name,initial_value,other_value", (
("field", 1, 2),
("list_field", [1], [1, 2]),
))
def test_field_old(self, field_name, initial_value, other_value):
model = ModelTest.from_dict({field_name: initial_value})
assert getattr(model, field_name) == initial_value
setattr(model, field_name, other_value)
assert getattr(model, field_name) == other_value
def test_once_field_old(self):
model = ModelTest.from_dict({"once_field": 1})
assert model.once_field == 1
model.once_field = 2
assert model.once_field == 1
def test_exec_field_old(self):
model = ModelTest.from_dict({"exec": 1})
assert model._exec == 1
model._exec = 2
assert model._exec == 2
assert model.as_dict()[u"exec"] == 2
def test_read_only_field_new(self):
model = ModelTest(new=True, read_only_field=1)
assert model.read_only_field is None
model.read_only_field = 2
assert model.read_only_field is None
def test_read_only_field_old(self):
model = ModelTest.from_dict({"read_only_field": 1})
assert model.read_only_field == 1
model.read_only_field = 2
assert model.read_only_field == 1
@pytest.mark.parametrize("value,modifier", [
(1, lambda x: x + 1),
(u"string", lambda x: x.upper())
])
def test_alt_type_field(self, value, modifier):
model = ModelTest.from_dict({"alt_type_field": value})
assert model.alt_type_field == value
assert model.as_dict()[u"alt_type_field"] == value
model.alt_type_field = modifier(value)
assert model.alt_type_field == modifier(value)
@pytest.mark.parametrize("input,dt", (
("2009-01-01T17:59:59Z", datetime(2009, 1, 1, 17, 59, 59, tzinfo=pytz.UTC)),
("2009-01-01T17:59:59+01:00", datetime(2009, 1, 1, 16, 59, 59, tzinfo=pytz.UTC)),
))
def test_time_field_from_dict(self, input, dt):
model = ModelTest.from_dict({"time_field": input})
assert isinstance(model.time_field, datetime)
assert model.time_field == dt
def test_time_field_as_dict(self):
model = ModelTest(time_field=datetime(2009, 1, 1, 17, 59, 59, tzinfo=pytz.UTC))
d = model.as_dict()
assert d["time_field"] == "2009-01-01T17:59:59Z"
@pytest.mark.parametrize("value,is_valid", (
(None, True),
(1, True),
(1.1, True),
("string", True),
([1, 2], True),
({"key": "value"}, True),
(ModelTest(), False),
([1, None], False),
))
def test_json_field_validation(self, value, is_valid):
model = ModelTest()
if is_valid:
setattr(model, "json_field", value)
assert model.json_field == value
else:
with pytest.raises(TypeError):
setattr(model, "json_field", value)
assert model.json_field is None
class RequiredFieldTest(Model):
required_field = RequiredField(int)
field = Field(int, 100)
@pytest.mark.usefixtures("logger")
class TestRequiredField(object):
@pytest.mark.parametrize("kwargs", [
{"required_field": 1, "field": 2},
{"required_field": 1},
])
def test_create_with_fields(self, kwargs):
instance = RequiredFieldTest(new=True, **kwargs)
for key, value in kwargs.items():
assert getattr(instance, key) == value
def test_create_fails_when_field_missing(self):
with pytest.raises(TypeError):
RequiredFieldTest(new=True, field=1)
class TestSelfField(object):
def test_create_from_dict(self):
model = ModelTest.from_dict({"self_field": {"exec": 1}})
assert getattr(model, "self_field") == ModelTest(_exec=1)
def test_get_or_create_merge(self, get):
get.return_value = _create_mock_response()
object_meta = ObjectMeta(name=NAME, labels={"test": "true"})
model = ModelTest.get_or_create(metadata=object_meta, self_field=ModelTest(_exec=1))
assert getattr(model, "self_field") == ModelTest.from_dict({"read_only_field": 1, "exec": 1})
def _create_mock_response():
mock_response = mock.Mock()
mock_response.json.return_value = {
"apiVersion": "v1",
"kind": "ModelTest",
"metadata": {
"creationTimestamp": "2019-11-23T13:43:42Z",
"generation": 7,
"labels": {
"test": "true"
},
"name": NAME,
"resourceVersion": "96758807",
"selfLink": _uri(NAME),
"uid": "d8f1ba26-b182-11e6-a364-fa163ea2a9c4"
},
"self_field": {
"read_only_field": 1
}
}
return mock_response
def _uri(name=""):
return "/apis/v1/modeltests/{name}".format(name=name)
|
|
"""Small utilities."""
import functools
import operator
import string
import time
from collections.abc import Sequence
from pyspark import RDD, SparkContext
from sympy import (
sympify, Symbol, Expr, SympifyError, count_ops, default_sort_key,
AtomicExpr, Integer, S
)
from sympy.core.assumptions import ManagedProperties
from sympy.core.sympify import CantSympify
#
# SymPy utilities
# ---------------
#
def ensure_sympify(obj, role='', expected_type=None):
"""Sympify the given object with checking and error reporting.
This is a shallow wrapper over SymPy sympify function to have error
reporting in consistent style and an optional type checking.
"""
header = 'Invalid {}: '.format(role)
try:
sympified = sympify(obj)
except SympifyError as exc:
raise TypeError(header, obj, 'failed to be simpified', exc.args)
if expected_type is None or isinstance(sympified, expected_type):
return sympified
else:
raise TypeError(header, sympified, 'expecting', expected_type)
def ensure_symb(obj, role=''):
"""Sympify the given object into a symbol."""
return ensure_sympify(obj, role, Symbol)
def ensure_expr(obj, role=''):
"""Sympify the given object into an expression."""
return ensure_sympify(obj, role, Expr)
def sympy_key(expr):
"""Get the key for ordering SymPy expressions.
This function assumes that the given expression is already sympified.
"""
return count_ops(expr), default_sort_key(expr)
def is_higher(obj, priority):
"""Test if the object has higher operation priority.
When the given object does not have defined priority, it is considered
lower.
"""
return getattr(obj, '_op_priority', priority - 1) > priority
class NonsympifiableFunc(CantSympify):
"""Utility for wrapping callable to be used for SymPy.
Inside SymPy functions like replace, things will first be attempted to be
sympified, which can be very expensive. By wrapping callable inside this
class, sympification attempts will be aborted very early on.
"""
__slots__ = ['_func']
def __init__(self, func):
"""Initialize the object."""
self._func = func
def __call__(self, *args, **kwargs):
"""Dispatch to the wrapped callable."""
return self._func(*args, **kwargs)
class _EnumSymbsMeta(ManagedProperties):
"""The meta class for enumeration symbols.
The primary purpose of this metaclass is to set the concrete singleton
values from the enumerated symbols set in the class body.
"""
SYMBS_INPUT = '_symbs_'
def __new__(mcs, name, bases, attrs):
"""Create the new concrete symbols class."""
cls = super().__new__(mcs, name, bases, attrs)
if not hasattr(cls, mcs.SYMBS_INPUT):
raise AttributeError('Cannot find attribute ' + mcs.SYMBS_INPUT)
symbs = getattr(cls, mcs.SYMBS_INPUT)
if symbs is None:
# Base class.
return cls
if not isinstance(symbs, Sequence):
raise ValueError('Invalid symbols', symbs, 'expecting a sequence')
for i in symbs:
invalid = not isinstance(i, Sequence) or len(i) != 2 or any(
not isinstance(j, str) for j in i
)
if invalid:
raise ValueError(
'Invalid symbol', i,
'expecting pairs of identifier and LaTeX form.'
)
if len(symbs) < 2:
raise ValueError(
'Invalid symbols ', symbs, 'expecting multiple of them'
)
for i, v in enumerate(symbs):
obj = cls(i)
setattr(cls, v[0], obj)
continue
return cls
class EnumSymbs(AtomicExpr, metaclass=_EnumSymbsMeta):
"""Base class for enumeration symbols.
Subclasses can set `_symbs_` inside the class body to be a sequence of
string pairs. Then attributes named after the first field of the pairs will
be created, with the LaTeX form controlled by the second pair.
The resulted values are valid SymPy expressions. They are ordered according
to their order in the given enumeration sequence.
"""
_symbs_ = None
_VAL_FIELD = '_val_index'
__slots__ = [_VAL_FIELD]
def __init__(self, val_index):
"""Initialize the concrete symbol object.
"""
if self._symbs_ is None:
raise ValueError('Base EnumSymbs class cannot be instantiated')
setattr(self, self._VAL_FIELD, val_index)
@property
def args(self):
"""The argument for SymPy."""
return Integer(getattr(self, self._VAL_FIELD)),
def __str__(self):
"""Get the string representation of the symbol."""
return self._symbs_[getattr(self, self._VAL_FIELD)][0]
def __repr__(self):
"""Get the machine readable string representation."""
return '.'.join([type(self).__name__, str(self)])
_op_priority = 20.0
def __eq__(self, other):
"""Test two values for equality."""
return isinstance(other, type(self)) and self.args == other.args
def __hash__(self):
"""Hash the concrete symbol object."""
return hash(repr(self))
def __lt__(self, other):
"""Test two values for less than order.
The order will be based on the order given in the class.
"""
return self.args < other.args
def __gt__(self, other):
"""Test two values for greater than."""
return self.args > other.args
def __sub__(self, other: Expr):
"""Subtract the current value with another.
This method is mainly to be able to work together with the Kronecker
delta class from SymPy. The difference is only guaranteed to have
correct ``is_zero`` property. The actual difference might not make
mathematical sense.
"""
if isinstance(other, type(self)):
return self.args[0] - other.args[0]
elif len(other.atoms(Symbol)) == 0:
raise ValueError(
'Invalid operation for ', (self, other),
'concrete symbols can only be subtracted for the same type'
)
else:
# We are having a symbolic value at the other expression. We just
# need to make sure that the result is fuzzy.
assert other.is_zero is None
return other
def __rsub__(self, other):
"""Subtract the current value from the other expression.
Only the ``is_zero`` property is guaranteed.
"""
return self.__sub__(other)
def sort_key(self, order=None):
return (
self.class_key(),
(1, tuple(i.sort_key() for i in self.args)),
S.One.sort_key(), S.One
)
def _latex(self, _):
"""Print itself as LaTeX code."""
return self._symbs_[self.args[0]][1]
#
# Spark utilities
# ---------------
#
class BCastVar:
"""Automatically broadcast variables.
This class is a shallow encapsulation of a variable and its broadcast
into the spark context. The variable can be redistributed automatically
after any change.
"""
__slots__ = [
'_ctx',
'_var',
'_bcast'
]
def __init__(self, ctx: SparkContext, var):
"""Initialize the broadcast variable."""
self._ctx = ctx
self._var = var
self._bcast = None
@property
def var(self):
"""Get the variable to mutate."""
self._bcast = None
return self._var
@property
def ro(self):
"""Get the variable, read-only.
Note that this function only prevents the redistribution of the
variable. It cannot force the variable not be mutated.
"""
return self._var
@property
def bcast(self):
"""Get the broadcast variable."""
if self._bcast is None:
self._bcast = self._ctx.broadcast(self._var)
return self._bcast
def nest_bind(rdd: RDD, func, full_balance=True):
"""Nest the flat map of the given function.
When an entry no longer need processing, None can be returned by the call
back function.
"""
if full_balance:
return _nest_bind_full_balance(rdd, func)
else:
return _nest_bind_no_balance(rdd, func)
def _nest_bind_full_balance(rdd: RDD, func):
"""Nest the flat map of the given function with full load balancing.
"""
ctx = rdd.context
def wrapped(obj):
"""Wrapped function for nest bind."""
vals = func(obj)
if vals is None:
return [(False, obj)]
else:
return [(True, i) for i in vals]
curr = rdd
curr.cache()
res = []
while curr.count() > 0:
step_res = curr.flatMap(wrapped)
step_res.cache()
new_entries = step_res.filter(lambda x: not x[0]).map(lambda x: x[1])
new_entries.cache()
res.append(new_entries)
curr = step_res.filter(lambda x: x[0]).map(lambda x: x[1])
curr.cache()
continue
return ctx.union(res)
def _nest_bind_no_balance(rdd: RDD, func):
"""Nest the flat map of the given function without load balancing.
"""
def wrapped(obj):
"""Wrapped function for nest bind."""
curr = [obj]
res = []
while len(curr) > 0:
new_curr = []
for i in curr:
step_res = func(i)
if step_res is None:
res.append(i)
else:
new_curr.extend(step_res)
continue
curr = new_curr
continue
return res
return rdd.flatMap(wrapped)
#
# Misc utilities
# --------------
#
def ensure_pair(obj, role):
"""Ensures that the given object is a pair."""
if not (isinstance(obj, Sequence) and len(obj) == 2):
raise TypeError('Invalid {}: '.format(role), obj, 'expecting pair')
return obj
_ALNUM = frozenset(
j
for i in [string.ascii_letters, string.digits]
for j in i
)
def extract_alnum(inp: str):
"""Extract the alpha numeric part of the string.
This function is mostly for generating valid identifiers for objects with a
mathematically formatted name.
"""
return ''.join(i for i in inp if i in _ALNUM)
#
# Small user utilities
# --------------------
#
def sum_(obj):
"""Sum the values in the given iterable.
Different from the built-in summation function, the summation is based on
the first item in the iterable. Or a SymPy integer zero is created
when the iterator is empty.
"""
i = iter(obj)
try:
init = next(i)
except StopIteration:
return Integer(0)
else:
return functools.reduce(operator.add, i, init)
def prod_(obj):
"""Product the values in the given iterable.
Similar to the summation utility function :py:func:`sum_`, here the initial
value for the reduction is the first element. Different from the summation,
here a SymPy integer unity will be returned for empty iterator.
"""
i = iter(obj)
try:
init = next(i)
except StopIteration:
return Integer(1)
else:
return functools.reduce(operator.mul, i, init)
class Stopwatch:
"""Utility class for printing timing information.
This class helps to timing the progression of batch jobs. It is capable of
getting and formatting the elapsed wall time between consecutive steps.
Note that the timing here might not be accurate to one second.
"""
def __init__(self, print_cb=print):
"""Initialize the stopwatch.
Parameters
----------
print_cb
The function will be called with the formatted time-stamp. By
default, it will just be written to stdout.
"""
self._print = print_cb
self.tick(total=True)
def tick(self, total=False):
"""Reset the timer.
Parameters
----------
total
If the total beginning time is going to be reset as well.
"""
self._prev = time.time()
if total:
self._begin = self._prev
def tock(self, label, tensor=None):
"""Make a timestamp.
The formatted timestamp will be given to the callback of the current
stamper. The wall time elapsed since the last :py:meth:`tick` will be
printed.
Parameters
----------
label
The label for the current step.
tensor
When a tensor is given, it will be cached, counted its number of
terms. This method has this parameter since if no reduction is
performed on the tensor, it might remain unevaluated inside Spark
and give misleading timing information.
"""
if tensor is not None:
tensor.cache()
n_terms = '{} terms, '.format(tensor.n_terms)
else:
n_terms = ''
now = time.time()
elapse = now - self._prev
self._prev = now
self._print(
'{} done, {}wall time: {:.2f} s'.format(label, n_terms, elapse)
)
def tock_total(self):
"""Make a timestamp for the total time.
The total time will be the time elapsed since the **total** time was
last reset.
"""
now = time.time()
self._print(
'Total wall time: {:.2f} s'.format(now - self._begin)
)
class CallByIndex:
"""Wrapper over callables such that they can be called by indexing.
This wrapper can be helpful for cases where an indexable object is expected
but flexibility of a callable is needed. The given object will be wrapped
inside and called when the wrapper is indexed.
"""
__slots__ = ['_callable']
def __init__(self, callable):
"""Initialize the object."""
self._callable = callable
def __getitem__(self, item):
"""Get the item by calling the given callable."""
return self._callable(item)
class InvariantIndexable(CallByIndex):
"""Objects whose indexing always gives the same constant.
This small utility is for cases where we need an indexable object whose
indexing result is actually invariant with respect to the given indices.
For an instance constructed with value ``v``, all indexing of it gives ``v``
back.
"""
__slots__ = []
def __init__(self, v):
"""Initialize the invariant tensor."""
super().__init__(lambda _: v)
class SymbResolver:
"""Resolver based on symbols.
It can be given an iterable of range/symbols pairs telling that the symbols
are associated with the key ranges. In strict mode, only the given symbols
can be resolved to be in the given range. In non-strict mode, all
expressions having one of the symbols will be resolved to be in the range of
the symbol.
Behaviour is undefined if we have non-disjoint symbol sets for different
ranges or when we have expression containing symbols for multiple known
ranges.
"""
__slots__ = [
'_known',
'_strict'
]
def __init__(self, range_symbs, strict):
"""Initialize the resolver."""
known = {}
self._known = known
for range_, dumms in range_symbs:
for i in dumms:
known[i] = range_
continue
continue
self._strict = strict
def __call__(self, expr: Expr):
"""Try to resolve an expression."""
known = self._known
if self._strict:
if expr in known:
return known[expr]
else:
for i in expr.atoms(Symbol):
if i in known:
return known[i]
continue
return None
|
|
# coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class NewCustomerSession(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'profile_id': 'str',
'coupon': 'str',
'referral': 'str',
'state': 'str',
'cart_items': 'list[CartItem]',
'identifiers': 'list[str]',
'total': 'float',
'attributes': 'object'
}
attribute_map = {
'profile_id': 'profileId',
'coupon': 'coupon',
'referral': 'referral',
'state': 'state',
'cart_items': 'cartItems',
'identifiers': 'identifiers',
'total': 'total',
'attributes': 'attributes'
}
def __init__(self, profile_id=None, coupon=None, referral=None, state='open', cart_items=None, identifiers=None, total=None, attributes=None, local_vars_configuration=None): # noqa: E501
"""NewCustomerSession - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._profile_id = None
self._coupon = None
self._referral = None
self._state = None
self._cart_items = None
self._identifiers = None
self._total = None
self._attributes = None
self.discriminator = None
if profile_id is not None:
self.profile_id = profile_id
if coupon is not None:
self.coupon = coupon
if referral is not None:
self.referral = referral
if state is not None:
self.state = state
if cart_items is not None:
self.cart_items = cart_items
if identifiers is not None:
self.identifiers = identifiers
if total is not None:
self.total = total
if attributes is not None:
self.attributes = attributes
@property
def profile_id(self):
"""Gets the profile_id of this NewCustomerSession. # noqa: E501
ID of the customers profile as used within this Talon.One account. May be omitted or set to the empty string if the customer does not yet have a known profile ID. # noqa: E501
:return: The profile_id of this NewCustomerSession. # noqa: E501
:rtype: str
"""
return self._profile_id
@profile_id.setter
def profile_id(self, profile_id):
"""Sets the profile_id of this NewCustomerSession.
ID of the customers profile as used within this Talon.One account. May be omitted or set to the empty string if the customer does not yet have a known profile ID. # noqa: E501
:param profile_id: The profile_id of this NewCustomerSession. # noqa: E501
:type: str
"""
self._profile_id = profile_id
@property
def coupon(self):
"""Gets the coupon of this NewCustomerSession. # noqa: E501
Any coupon code entered. # noqa: E501
:return: The coupon of this NewCustomerSession. # noqa: E501
:rtype: str
"""
return self._coupon
@coupon.setter
def coupon(self, coupon):
"""Sets the coupon of this NewCustomerSession.
Any coupon code entered. # noqa: E501
:param coupon: The coupon of this NewCustomerSession. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
coupon is not None and len(coupon) > 100):
raise ValueError("Invalid value for `coupon`, length must be less than or equal to `100`") # noqa: E501
self._coupon = coupon
@property
def referral(self):
"""Gets the referral of this NewCustomerSession. # noqa: E501
Any referral code entered. # noqa: E501
:return: The referral of this NewCustomerSession. # noqa: E501
:rtype: str
"""
return self._referral
@referral.setter
def referral(self, referral):
"""Sets the referral of this NewCustomerSession.
Any referral code entered. # noqa: E501
:param referral: The referral of this NewCustomerSession. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
referral is not None and len(referral) > 100):
raise ValueError("Invalid value for `referral`, length must be less than or equal to `100`") # noqa: E501
self._referral = referral
@property
def state(self):
"""Gets the state of this NewCustomerSession. # noqa: E501
Indicates the current state of the session. All sessions must start in the \"open\" state, after which valid transitions are... 1. open -> closed 2. open -> cancelled 3. closed -> cancelled # noqa: E501
:return: The state of this NewCustomerSession. # noqa: E501
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this NewCustomerSession.
Indicates the current state of the session. All sessions must start in the \"open\" state, after which valid transitions are... 1. open -> closed 2. open -> cancelled 3. closed -> cancelled # noqa: E501
:param state: The state of this NewCustomerSession. # noqa: E501
:type: str
"""
allowed_values = ["open", "closed", "cancelled"] # noqa: E501
if self.local_vars_configuration.client_side_validation and state not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}" # noqa: E501
.format(state, allowed_values)
)
self._state = state
@property
def cart_items(self):
"""Gets the cart_items of this NewCustomerSession. # noqa: E501
Serialized JSON representation. # noqa: E501
:return: The cart_items of this NewCustomerSession. # noqa: E501
:rtype: list[CartItem]
"""
return self._cart_items
@cart_items.setter
def cart_items(self, cart_items):
"""Sets the cart_items of this NewCustomerSession.
Serialized JSON representation. # noqa: E501
:param cart_items: The cart_items of this NewCustomerSession. # noqa: E501
:type: list[CartItem]
"""
self._cart_items = cart_items
@property
def identifiers(self):
"""Gets the identifiers of this NewCustomerSession. # noqa: E501
Identifiers for the customer, this can be used for limits on values such as device ID. # noqa: E501
:return: The identifiers of this NewCustomerSession. # noqa: E501
:rtype: list[str]
"""
return self._identifiers
@identifiers.setter
def identifiers(self, identifiers):
"""Sets the identifiers of this NewCustomerSession.
Identifiers for the customer, this can be used for limits on values such as device ID. # noqa: E501
:param identifiers: The identifiers of this NewCustomerSession. # noqa: E501
:type: list[str]
"""
self._identifiers = identifiers
@property
def total(self):
"""Gets the total of this NewCustomerSession. # noqa: E501
The total sum of the cart in one session. # noqa: E501
:return: The total of this NewCustomerSession. # noqa: E501
:rtype: float
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this NewCustomerSession.
The total sum of the cart in one session. # noqa: E501
:param total: The total of this NewCustomerSession. # noqa: E501
:type: float
"""
self._total = total
@property
def attributes(self):
"""Gets the attributes of this NewCustomerSession. # noqa: E501
A key-value map of the sessions attributes. The potentially valid attributes are configured in your accounts developer settings. # noqa: E501
:return: The attributes of this NewCustomerSession. # noqa: E501
:rtype: object
"""
return self._attributes
@attributes.setter
def attributes(self, attributes):
"""Sets the attributes of this NewCustomerSession.
A key-value map of the sessions attributes. The potentially valid attributes are configured in your accounts developer settings. # noqa: E501
:param attributes: The attributes of this NewCustomerSession. # noqa: E501
:type: object
"""
self._attributes = attributes
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NewCustomerSession):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, NewCustomerSession):
return True
return self.to_dict() != other.to_dict()
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid.backward import append_backward
import paddle.fluid.framework as framework
from paddle.fluid.framework import Program, switch_main_program
import bisect
import numpy as np
fluid.default_startup_program().random_seed = 1
class TestDyRnnStaticInput(unittest.TestCase):
def setUp(self):
self._delta = 0.005
self._max_sequence_len = 3
self._program = Program()
switch_main_program(self._program)
self.output_dim = 10
self.place = core.CPUPlace()
self.prepare_x_tensor()
self.prepare_static_input_tensor()
self.exe = fluid.Executor(self.place)
def prepare_x_tensor(self):
self.x_tensor_dim = 10
lod = [[2, 1, 3]]
shape = [sum(lod[0]), self.x_tensor_dim]
self.x_tensor_data = np.random.random(shape).astype('float32')
self.x_tensor = core.LoDTensor()
self.x_tensor.set_recursive_sequence_lengths(lod)
self.x_tensor.set(self.x_tensor_data, self.place)
def prepare_static_input_tensor(self):
self.static_input_tensor_dim = 4
lod = [[1, 2, 3]]
shape = [sum(lod[0]), self.static_input_tensor_dim]
self.static_input_data = np.random.random(shape).astype('float32')
self.static_input_tensor = core.LoDTensor()
self.static_input_tensor.set_recursive_sequence_lengths(lod)
self.static_input_tensor.set(self.static_input_data, self.place)
def fetch_value(self, var):
fetch_outs = self.exe.run(feed={
'x_tensor': self.x_tensor,
'static_input_tensor': self.static_input_tensor
},
fetch_list=[var],
return_numpy=False)
return self._lodtensor_to_ndarray(fetch_outs[0])
def _lodtensor_to_ndarray(self, lod_tensor):
dims = lod_tensor.shape()
ndarray = np.zeros(shape=dims).astype('float32')
for i in xrange(np.product(dims)):
ndarray.ravel()[i] = lod_tensor._get_float_element(i)
return ndarray, lod_tensor.recursive_sequence_lengths()
def build_graph(self, only_forward=False):
x_tensor = fluid.layers.data(
name='x_tensor',
shape=[self.x_tensor_dim],
dtype='float32',
lod_level=1)
x_tensor.stop_gradient = False
static_input_tensor = fluid.layers.data(
name='static_input_tensor',
shape=[self.static_input_tensor_dim],
dtype='float32',
lod_level=1)
static_input_tensor.stop_gradient = False
if only_forward:
static_input_out_array = self._program.global_block().create_var(
name='static_input_out_array',
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype='float32')
static_input_out_array.stop_gradient = True
rnn = fluid.layers.DynamicRNN()
with rnn.block():
step_x = rnn.step_input(x_tensor)
step_static_input = rnn.static_input(static_input_tensor)
if only_forward:
fluid.layers.array_write(
x=step_static_input,
i=rnn.step_idx,
array=static_input_out_array)
last = fluid.layers.sequence_pool(
input=step_static_input, pool_type='last')
projected = fluid.layers.fc(input=[step_x, last],
size=self.output_dim)
rnn.output(projected)
if only_forward:
static_input_step_outs = []
step_idx = fluid.layers.fill_constant(
shape=[1], dtype='int64', value=0)
step_idx.stop_gradient = True
for i in xrange(self._max_sequence_len):
step_out = fluid.layers.array_read(static_input_out_array,
step_idx)
step_out.stop_gradient = True
static_input_step_outs.append(step_out)
fluid.layers.increment(x=step_idx, value=1.0, in_place=True)
if only_forward:
return static_input_step_outs
last = fluid.layers.sequence_pool(input=rnn(), pool_type='last')
loss = fluid.layers.mean(last)
append_backward(loss)
static_input_grad = self._program.global_block().var(
framework.grad_var_name('static_input_tensor'))
return static_input_grad, loss
def get_expected_static_step_outs(self):
x_lod = self.x_tensor.recursive_sequence_lengths()
x_seq_len = x_lod[0]
x_seq_len_sorted = sorted(x_seq_len)
x_sorted_indices = np.argsort(x_seq_len)[::-1]
static_lod = self.static_input_tensor.recursive_sequence_lengths()
static_sliced = []
cur_offset = 0
for i in xrange(len(static_lod[0])):
static_sliced.append(self.static_input_data[cur_offset:(
cur_offset + static_lod[0][i])])
cur_offset += static_lod[0][i]
static_seq_len = static_lod[0]
static_reordered = []
for i in xrange(len(x_sorted_indices)):
static_reordered.extend(static_sliced[x_sorted_indices[i]].tolist())
static_seq_len_reordered = [
static_seq_len[x_sorted_indices[i]]
for i in xrange(len(x_sorted_indices))
]
static_step_outs = []
static_step_lods = []
for i in xrange(self._max_sequence_len):
end = len(x_seq_len) - bisect.bisect_left(x_seq_len_sorted, i + 1)
lod = []
total_len = 0
for i in xrange(end):
lod.append(static_seq_len_reordered[i])
total_len += lod[-1]
static_step_lods.append([lod])
end = total_len
static_step_outs.append(
np.array(static_reordered[:end]).astype('float32'))
return static_step_outs, static_step_lods
def test_step_out(self):
static_step_outs = self.build_graph(only_forward=True)
self.exe.run(framework.default_startup_program())
expected_outs, expected_lods = self.get_expected_static_step_outs()
for i in xrange(self._max_sequence_len):
step_out, lod = self.fetch_value(static_step_outs[i])
self.assertTrue(np.allclose(step_out, expected_outs[i]))
self.assertTrue(np.allclose(lod, expected_lods[i]))
def test_network_gradient(self):
static_input_grad, loss = self.build_graph()
self.exe.run(framework.default_startup_program())
actual_gradients, actual_lod = self.fetch_value(static_input_grad)
static_input_shape = self.static_input_tensor.shape()
numeric_gradients = np.zeros(shape=static_input_shape).astype('float32')
# calculate numeric gradients
tensor_size = np.product(static_input_shape)
for i in xrange(tensor_size):
origin = self.static_input_tensor._get_float_element(i)
x_pos = origin + self._delta
self.static_input_tensor._set_float_element(i, x_pos)
y_pos = self.fetch_value(loss)[0][0]
x_neg = origin - self._delta
self.static_input_tensor._set_float_element(i, x_neg)
y_neg = self.fetch_value(loss)[0][0]
self.static_input_tensor._set_float_element(i, origin)
numeric_gradients.ravel()[i] = (y_pos - y_neg) / self._delta / 2
self.assertTrue(np.allclose(actual_gradients, numeric_gradients, 0.001))
self.assertTrue(
np.allclose(actual_lod,
self.static_input_tensor.recursive_sequence_lengths()))
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.test_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import googletest
class TestUtilTest(test_util.TensorFlowTestCase):
def test_assert_ops_in_graph(self):
with self.test_session():
constant_op.constant(["hello", "taffy"], name="hello")
test_util.assert_ops_in_graph({"hello": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"bye": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"hello": "Variable"}, ops.get_default_graph())
def test_assert_equal_graph_def(self):
with ops.Graph().as_default() as g:
def_empty = g.as_graph_def()
constant_op.constant(5, name="five")
constant_op.constant(7, name="seven")
def_57 = g.as_graph_def()
with ops.Graph().as_default() as g:
constant_op.constant(7, name="seven")
constant_op.constant(5, name="five")
def_75 = g.as_graph_def()
# Comparing strings is order dependent
self.assertNotEqual(str(def_57), str(def_75))
# assert_equal_graph_def doesn't care about order
test_util.assert_equal_graph_def(def_57, def_75)
# Compare two unequal graphs
with self.assertRaisesRegexp(AssertionError,
r"^Found unexpected node 'seven"):
test_util.assert_equal_graph_def(def_57, def_empty)
def testIsGoogleCudaEnabled(self):
# The test doesn't assert anything. It ensures the py wrapper
# function is generated correctly.
if test_util.IsGoogleCudaEnabled():
print("GoogleCuda is enabled")
else:
print("GoogleCuda is disabled")
def testAssertProtoEqualsStr(self):
graph_str = "node { name: 'w1' op: 'params' }"
graph_def = graph_pb2.GraphDef()
text_format.Merge(graph_str, graph_def)
# test string based comparison
self.assertProtoEquals(graph_str, graph_def)
# test original comparison
self.assertProtoEquals(graph_def, graph_def)
def testNDArrayNear(self):
a1 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a3 = np.array([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]])
self.assertTrue(self._NDArrayNear(a1, a2, 1e-5))
self.assertFalse(self._NDArrayNear(a1, a3, 1e-5))
def testCheckedThreadSucceeds(self):
def noop(ev):
ev.set()
event_arg = threading.Event()
self.assertFalse(event_arg.is_set())
t = self.checkedThread(target=noop, args=(event_arg,))
t.start()
t.join()
self.assertTrue(event_arg.is_set())
def testCheckedThreadFails(self):
def err_func():
return 1 // 0
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("integer division or modulo by zero" in str(fe.exception))
def testCheckedThreadWithWrongAssertionFails(self):
x = 37
def err_func():
self.assertTrue(x < 10)
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("False is not true" in str(fe.exception))
def testMultipleThreadsWithOneFailure(self):
def err_func(i):
self.assertTrue(i != 7)
threads = [
self.checkedThread(
target=err_func, args=(i,)) for i in range(10)
]
for t in threads:
t.start()
for i, t in enumerate(threads):
if i == 7:
with self.assertRaises(self.failureException):
t.join()
else:
t.join()
def _WeMustGoDeeper(self, msg):
with self.assertRaisesOpError(msg):
node_def = ops._NodeDef("op_type", "name")
node_def_orig = ops._NodeDef("op_type_orig", "orig")
op_orig = ops.Operation(node_def_orig, ops.get_default_graph())
op = ops.Operation(node_def, ops.get_default_graph(), original_op=op_orig)
raise errors.UnauthenticatedError(node_def, op, "true_err")
def testAssertRaisesOpErrorDoesNotPassMessageDueToLeakedStack(self):
with self.assertRaises(AssertionError):
self._WeMustGoDeeper("this_is_not_the_error_you_are_looking_for")
self._WeMustGoDeeper("true_err")
self._WeMustGoDeeper("name")
self._WeMustGoDeeper("orig")
def testAllCloseScalars(self):
self.assertAllClose(7, 7 + 1e-8)
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(7, 8)
def testArrayNear(self):
a = [1, 2]
b = [1, 2, 5]
with self.assertRaises(AssertionError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [[1, 2], [3, 4]]
with self.assertRaises(TypeError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [1, 2]
self.assertArrayNear(a, b, 0.001)
def testForceGPU(self):
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Cannot assign a device to node"):
with self.test_session(force_gpu=True):
# this relies on us not having a GPU implementation for assert, which
# seems sensible
x = constant_op.constant(True)
y = [15]
control_flow_ops.Assert(x, y).run()
def testAssertAllCloseAccordingToType(self):
# test float64
self.assertAllCloseAccordingToType(
np.asarray([1e-8], dtype=np.float64),
np.asarray([2e-8], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float64),
np.asarray([2e-7], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
# test float32
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float32),
np.asarray([2e-7], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-6], dtype=np.float32),
np.asarray([2e-6], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
# test float16
self.assertAllCloseAccordingToType(
np.asarray([1e-4], dtype=np.float16),
np.asarray([2e-4], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-3], dtype=np.float16),
np.asarray([2e-3], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
def testRandomSeed(self):
a = random.randint(1, 1000)
a_np_rand = np.random.rand(1)
with self.test_session():
a_rand = random_ops.random_normal([1]).eval()
# ensure that randomness in multiple testCases is deterministic.
self.setUp()
b = random.randint(1, 1000)
b_np_rand = np.random.rand(1)
with self.test_session():
b_rand = random_ops.random_normal([1]).eval()
self.assertEqual(a, b)
self.assertEqual(a_np_rand, b_np_rand)
self.assertEqual(a_rand, b_rand)
if __name__ == "__main__":
googletest.main()
|
|
import argparse
import csv, os, time
import MySQLdb # http://sourceforge.net/projects/mysql-python/
import result
from result import Result
import gspread, getpass # https://pypi.python.org/pypi/gspread/ (v0.1.0)
# Get command line arguments
parser = argparse.ArgumentParser(description='Load SNP and locus data')
parser.add_argument('--dev', action='store_true', help='Only load chromosome 21 for development testing')
parser.add_argument('--path', help='Path to chromosome data')
parser.add_argument('--db', type=str, help='MySQL database name')
parser.add_argument('--yhost', type=str, help='MySQL host')
parser.add_argument('--username', type=str, help='MySQL username')
parser.add_argument('--password', type=str, help='MySQL password')
parser.add_argument('--tag', type=str, help='Tag to place in results file')
parser.add_argument('--remote', action='store_true', help='Enable remote reporting')
parser.add_argument('--rkey', help='Google document key')
parser.add_argument('--start', type=str, help='Chromosome to start load from')
parser.add_argument('--indexes', action='store_true', help='Create indexes')
parser.add_argument('--queries', action='store_true', help='Run queries')
args = parser.parse_args()
# Set script version
scriptVersion = "2.0"
# Set default variables
dev = False
remote = False
createIndexes = False
runQueries = False
databaseName = 'snp_research'
username = 'dev'
password = ''
sqlHost = '127.0.0.1'
path = ''
tag = ''
docKey = ''
start = '1'
# Update any present from CLI
if args.dev: # If dev mode, only load chr 21
dev = True
if args.remote and args.rkey is not None: # If set to remote log and document key is present, log to GDocs
remote = True
docKey = args.rkey
else:
remote = False
if args.path is not None: # If set, use as root path for chromosome data
path = args.path
if args.db is not None: # If set, use as database name for MySQL
databaseName = args.db
if args.username is not None: # MySQL username
username = args.username
if args.password is not None: # MySQL password
password = args.password
if args.yhost is not None: # MySQL host name
sqlHost = args.yhost
if args.tag is not None: # Tag to place in results file
tag = args.tag
if args.start is not None:
start = args.start
if args.indexes is not None:
createIndexes = args.indexes
if args.queries is not None:
runQueries = args.queries
# Open results file
resultsFileName = 'results-mysql'
if resultsFileName != "":
resultsFileName += '-' + tag
resultsFileName += '.txt'
resultsFile = open(resultsFileName, 'w')
resultsFile.write(scriptVersion + '\n')
result = Result()
resultsFile.write(result.toHeader() + '\n')
if remote:
gusername = raw_input("Enter Google username: ")
gpassword = getpass.getpass("Enter Google password: ")
gs = gspread.Client(auth=(gusername,gpassword))
gs.login()
ss = gs.open_by_key(docKey)
ws = ss.add_worksheet(tag + "-" + str(time.time()),1,1)
ws.append_row(result.headerArr())
# Data files
snpFilePath = 'snpData-chr{0}.txt'
lociFilePath = 'lociData-chr{0}.txt'
# Chromosome list
chromosomes = ["21"] # dev list
# If not in dev mode, iterate through all chromosomes
if dev is False:
chromosomes = ["1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19","20","21","22","X","Y","MT"] # complete list
if start != "1": # Allow restart from anywhere in chromosome list, sequentially as ordered above
startList = []
hitMin = False
for cur in chromosomes:
if cur == start:
hitMin = True
if hitMin:
startList.append(cur)
chromosomes = startList
# Create MySQL database, tables if not exists
mysqlConnection = MySQLdb.connect(host=sqlHost,user=username,passwd=password)
createDbCursor = mysqlConnection.cursor()
createDbCursor.execute("CREATE DATABASE IF NOT EXISTS " + databaseName + " DEFAULT CHARACTER SET 'utf8'".format(databaseName))
mysqlConnection.commit()
mysqlConnection.close() # Reconnect with database name
mysqlConnection = MySQLdb.connect(host=sqlHost,user=username,passwd=password,db=databaseName)
createDbCursor = mysqlConnection.cursor()
TABLES = {}
TABLES['snp'] = (
"CREATE TABLE IF NOT EXISTS `snp`("
" `id` int(11) NOT NULL AUTO_INCREMENT,"
" `rsid` varchar(45) NOT NULL,"
" `chr` varchar(5) NOT NULL,"
" `has_sig` binary(1) NOT NULL,"
" PRIMARY KEY (`id`)"
") ENGINE=InnoDB AUTO_INCREMENT=862719 DEFAULT CHARSET=utf8;")
TABLES['locus'] = (
"CREATE TABLE IF NOT EXISTS `locus`("
" `id` int(11) NOT NULL AUTO_INCREMENT,"
" `mrna_acc` varchar(45) NOT NULL,"
" `gene` varchar(45) NOT NULL,"
" `class` varchar(45) NOT NULL,"
" `snp_id` int(11) NOT NULL,"
" PRIMARY KEY (`id`),"
" CONSTRAINT `idx_snp` FOREIGN KEY (`snp_id`) REFERENCES `snp` (`id`) ON DELETE NO ACTION ON UPDATE NO ACTION"
") ENGINE=InnoDB AUTO_INCREMENT=7564 DEFAULT CHARSET=utf8;")
for name, ddl in TABLES.iteritems():
createDbCursor.execute(ddl)
mysqlConnection.commit()
createDbCursor.execute("SET FOREIGN_KEY_CHECKS = 0;")
createDbCursor.execute("SET UNIQUE_CHECKS = 0;")
createDbCursor.execute("SET SESSION tx_isolation='READ-UNCOMMITTED'")
createDbCursor.execute("SET sql_log_bin = 0;")
createDbCursor.close()
# Dictionaries and arrays for SQL and MongoDB queries
snpInserts = {} # Dictionary for rsid/insert for SNP data
lociInserts = [] # Array for loci insert queries
rsidList = {} # Dictionary of RSIDs that will also hold the
# primary key for each SNP in SQL
for curChr in chromosomes:
result = Result()
result.method = "MySQL"
result.tag = tag
print "Chromosome " + str(curChr)
result.chromosome = str(curChr)
# Set file paths for current chromosome
curSnpFilePath = snpFilePath.format(curChr)
curLociFilePath = lociFilePath.format(curChr)
if len(path) > 0:
curSnpFilePath = path.rstrip('\\') + '\\' + curSnpFilePath
curLociFilePath = path.rsplit('\\') + '\\' + curLociFilePath
# Clear dictionaries for loading multiple chromosomes
snpInserts.clear()
lociInserts = []
rsidList.clear()
print "Chromosome " + str(curChr) + ". Reading SNP Data"
result.snpLoadStart = time.time()
# Read in data from SNP file
with open(curSnpFilePath,'r') as csvfile:
data = csv.reader(csvfile,delimiter='\t')
for row in data:
if(len(row) == 3):
hasSig = False
if row[2] != '' and row[2] != 'false':
hasSig = True
rsidList[row[0]] = 0
insStr = "INSERT INTO snp (rsid, chr, has_sig) VALUES (\"{0}\", \"{1}\", {2})".format(row[0], row[1], hasSig)
snpInserts[row[0]] = insStr
# Data for reporting
result.snpLoadEnd = time.time()
result.totalSnps = len(snpInserts)
# Insert SNP data into MySQL
mysqlCursor = mysqlConnection.cursor()
print "Chromosome " + str(curChr) + ". Inserting SNP Data."
# Log current run start time
result.snpInsertStart = time.time()
# For each snp, insert record and then grab primary key
for rsid,snp in snpInserts.iteritems():
mysqlCursor.execute(snp)
rsidList[rsid] = mysqlCursor.lastrowid
# Commit all inserts to MySQL and grab end time
mysqlConnection.commit()
# Log completed time, close MySQL cursor
result.snpInsertEnd=time.time()
mysqlCursor.close()
# Clear list of SNPs to free up memory
snpInserts.clear()
print "Chromosome " + str(curChr) + ". Reading loci Data."
result.lociLoadStart = time.time()
# Now that we have primary keys for each SNP, read in loci data
with open(curLociFilePath,'r') as csvfile:
data = csv.reader(csvfile,delimiter='\t')
for row in data:
if(len(row) == 4):
# Load loci in MySQL statements
if row[0] in rsidList and rsidList[row[0]] > 0: # If RSID value is present, load with PK
insStr = "INSERT INTO locus (mrna_acc, gene, class, snp_id) VALUES (\"{0}\", \"{1}\", \"{2}\", {3})".format(row[1], row[2], row[3], rsidList[row[0]])
lociInserts.append(insStr)
# Data for reporting
result.lociLoadEnd = time.time()
result.totalLoci = len(lociInserts)
# Create new cursor, enter loci data into MySQL
cursor = mysqlConnection.cursor()
print "Chromosome " + str(curChr) + ". Inserting loci data."
# Log current run start time and number of loci
result.lociInsertStart = time.time()
# Insert each locus
for locus in lociInserts:
cursor.execute(locus)
# Commit data to MySQL
mysqlConnection.commit()
# Log end time and total MySQL time
result.lociInsertEnd = time.time()
# Close MySQL cursor
cursor.close()
print result.toTerm()
resultsFile.write(result.toString() + '\n')
if remote:
try:
print "Sending to GDocs..."
gs.login()
ws.append_row(result.stringArr())
except:
print "Unable to send to GDocs, continuing..."
# Create new cursor, create indexes and run test queries
cursor = mysqlConnection.cursor()
print "Turning on key checks..."
cursor.execute("SET FOREIGN_KEY_CHECKS = 1;")
cursor.execute("SET UNIQUE_CHECKS = 1;")
if createIndexes:
result = Result()
result.method = "MySQL-Idx"
result.tag = tag
rsidIndex = "CREATE UNIQUE INDEX `idx_rsid` ON `snp` (`rsid`)"
clinIndex = "CREATE INDEX `idx_clin` ON `snp` (`has_sig`)"
geneIndex = "CREATE INDEX `idx_gene` ON `locus` (`gene`)"
print "Creating RSID index..."
idxStart = time.time()
cursor.execute(rsidIndex)
idxEnd = time.time()
result.idxRsid = idxEnd - idxStart
print "Creating ClinSig index..."
idxStart = time.time()
cursor.execute(clinIndex)
idxEnd = time.time()
result.idxClinSig = idxEnd - idxStart
print "Creating Gene index..."
idxStart = time.time()
cursor.execute(geneIndex)
idxEnd = time.time()
result.idxGene = idxEnd - idxStart
resultsFile.write(result.toString() + '\n')
if remote:
try:
print "Sending to GDocs..."
gs.login()
ws.append_row(result.stringArr())
except:
print "Unable to send to GDocs, continuing..."
if runQueries:
for z in range(1,101):
result = Result()
result.method = "MySQL-Qry" + str(z)
result.tag = tag
print "Running queries, count " + str(z)
idxStart = time.time()
cursor.execute("SELECT * FROM locus l, snp s WHERE l.snp_id = s.id AND s.rsid = 'rs8788'")
idxEnd = time.time()
result.qryByRsid = idxEnd - idxStart
idxStart = time.time()
cursor.execute("SELECT count(s.id) FROM locus l, snp s WHERE l.snp_id = s.id AND s.has_sig = true")
idxEnd = time.time()
result.qryByClinSig = idxEnd - idxStart
idxStart = time.time()
cursor.execute("SELECT count(distinct s.rsid) FROM locus l, snp s WHERE l.snp_id = s.id AND l.gene = 'GRIN2B'")
idxEnd = time.time()
result.qryByGene = idxEnd - idxStart
idxStart = time.time()
cursor.execute("SELECT count(distinct s.rsid) FROM locus l, snp s WHERE l.snp_id = s.id AND l.gene = 'GRIN2B' AND s.has_sig = true")
idxEnd = time.time()
result.qryByGeneSig = idxEnd - idxStart
resultsFile.write(result.toString() + '\n')
if remote:
try:
print "Sending to GDocs..."
gs.login()
ws.append_row(result.stringArr())
except:
print "Unable to send to GDocs, continuing..."
# Close MySQL cursor
cursor.close()
resultsFile.close()
mysqlConnection.close()
print "Run complete."
|
|
from __future__ import unicode_literals
import httplib
import logging
from django.core.exceptions import ValidationError
from django.db import IntegrityError
from django.db import connection
from django.db import transaction
from flask import request
from framework.auth import Auth
from framework.sessions import get_session
from framework.exceptions import HTTPError
from framework.auth.decorators import must_be_signed, must_be_logged_in
from api.caching.tasks import update_storage_usage
from osf.exceptions import InvalidTagError, TagNotFoundError
from osf.models import FileVersion, OSFUser
from osf.utils.permissions import WRITE
from osf.utils.requests import check_select_for_update
from website.project.decorators import (
must_not_be_registration, must_have_permission
)
from website.project.model import has_anonymous_link
from website.files import exceptions
from addons.osfstorage import utils
from addons.osfstorage import decorators
from addons.osfstorage.models import OsfStorageFolder
from addons.osfstorage import settings as osf_storage_settings
logger = logging.getLogger(__name__)
def make_error(code, message_short=None, message_long=None):
data = {}
if message_short:
data['message_short'] = message_short
if message_long:
data['message_long'] = message_long
return HTTPError(code, data=data)
@must_be_signed
def osfstorage_update_metadata(payload, **kwargs):
"""Metadata received from WaterButler, is built incrementally via latent task calls to this endpoint.
The basic metadata response looks like::
{
"metadata": {
# file upload
"name": "file.name",
"md5": "d41d8cd98f00b204e9800998ecf8427e",
"path": "...",
"sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"version": "2",
"downloads": "1",
"checkout": "...",
"latestVersionSeen": {"userId": "abc12", "seen": true},
"modified": "a date",
"modified_utc": "a date in utc",
# glacier vault (optional)
"archive": "glacier_key",
"vault": "glacier_vault_name",
# parity files
"parity": {
"redundancy": "5",
"files": [
{"name": "foo.txt.par2","sha256": "abc123"},
{"name": "foo.txt.vol00+01.par2","sha256": "xyz321"},
]
}
},
}
"""
try:
version_id = payload['version']
metadata = payload['metadata']
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
if check_select_for_update():
version = FileVersion.objects.filter(_id=version_id).select_for_update().first()
else:
version = FileVersion.objects.filter(_id=version_id).first()
if version is None:
raise HTTPError(httplib.NOT_FOUND)
version.update_metadata(metadata)
return {'status': 'success'}
@must_be_signed
@decorators.autoload_filenode(must_be='file')
def osfstorage_get_revisions(file_node, payload, target, **kwargs):
from osf.models import PageCounter, FileVersion # TODO Fix me onces django works
is_anon = has_anonymous_link(target, Auth(private_key=request.args.get('view_only')))
counter_prefix = 'download:{}:{}:'.format(file_node.target._id, file_node._id)
version_count = file_node.versions.count()
# Don't worry. The only % at the end of the LIKE clause, the index is still used
counts = dict(PageCounter.objects.filter(_id__startswith=counter_prefix).values_list('_id', 'total'))
qs = FileVersion.includable_objects.filter(basefilenode__id=file_node.id).include('creator__guids').order_by('-created')
for i, version in enumerate(qs):
version._download_count = counts.get('{}{}'.format(counter_prefix, version_count - i - 1), 0)
# Return revisions in descending order
return {
'revisions': [
utils.serialize_revision(target, file_node, version, index=version_count - idx - 1, anon=is_anon)
for idx, version in enumerate(qs)
]
}
@decorators.waterbutler_opt_hook
def osfstorage_copy_hook(source, destination, name=None, **kwargs):
ret = source.copy_under(destination, name=name).serialize(), httplib.CREATED
update_storage_usage(destination.target)
return ret
@decorators.waterbutler_opt_hook
def osfstorage_move_hook(source, destination, name=None, **kwargs):
source_target = source.target
try:
ret = source.move_under(destination, name=name).serialize(), httplib.OK
except exceptions.FileNodeCheckedOutError:
raise HTTPError(httplib.METHOD_NOT_ALLOWED, data={
'message_long': 'Cannot move file as it is checked out.'
})
except exceptions.FileNodeIsPrimaryFile:
raise HTTPError(httplib.FORBIDDEN, data={
'message_long': 'Cannot move file as it is the primary file of preprint.'
})
# once the move is complete recalculate storage for both targets if it's a inter-target move.
if source_target != destination.target:
update_storage_usage(destination.target)
update_storage_usage(source_target)
return ret
@must_be_signed
@decorators.autoload_filenode(default_root=True)
def osfstorage_get_lineage(file_node, **kwargs):
lineage = []
while file_node:
lineage.append(file_node.serialize())
file_node = file_node.parent
return {'data': lineage}
@must_be_signed
@decorators.autoload_filenode(default_root=True)
def osfstorage_get_metadata(file_node, **kwargs):
try:
# TODO This should change to version as its internal it can be changed anytime
version = int(request.args.get('revision'))
except (ValueError, TypeError): # If its not a number
version = None
return file_node.serialize(version=version, include_full=True)
@must_be_signed
@decorators.autoload_filenode(must_be='folder')
def osfstorage_get_children(file_node, **kwargs):
from django.contrib.contenttypes.models import ContentType
user_id = request.args.get('user_id')
user_content_type_id = ContentType.objects.get_for_model(OSFUser).id
user_pk = OSFUser.objects.filter(guids___id=user_id, guids___id__isnull=False).values_list('pk', flat=True).first()
with connection.cursor() as cursor:
# Read the documentation on FileVersion's fields before reading this code
cursor.execute("""
SELECT json_agg(CASE
WHEN F.type = 'osf.osfstoragefile' THEN
json_build_object(
'id', F._id
, 'path', '/' || F._id
, 'name', F.name
, 'kind', 'file'
, 'size', LATEST_VERSION.size
, 'downloads', COALESCE(DOWNLOAD_COUNT, 0)
, 'version', (SELECT COUNT(*) FROM osf_basefilenode_versions WHERE osf_basefilenode_versions.basefilenode_id = F.id)
, 'contentType', LATEST_VERSION.content_type
, 'modified', LATEST_VERSION.created
, 'created', EARLIEST_VERSION.created
, 'checkout', CHECKOUT_GUID
, 'md5', LATEST_VERSION.metadata ->> 'md5'
, 'sha256', LATEST_VERSION.metadata ->> 'sha256'
, 'latestVersionSeen', SEEN_LATEST_VERSION.case
)
ELSE
json_build_object(
'id', F._id
, 'path', '/' || F._id || '/'
, 'name', F.name
, 'kind', 'folder'
)
END
)
FROM osf_basefilenode AS F
LEFT JOIN LATERAL (
SELECT * FROM osf_fileversion
JOIN osf_basefilenode_versions ON osf_fileversion.id = osf_basefilenode_versions.fileversion_id
WHERE osf_basefilenode_versions.basefilenode_id = F.id
ORDER BY created DESC
LIMIT 1
) LATEST_VERSION ON TRUE
LEFT JOIN LATERAL (
SELECT * FROM osf_fileversion
JOIN osf_basefilenode_versions ON osf_fileversion.id = osf_basefilenode_versions.fileversion_id
WHERE osf_basefilenode_versions.basefilenode_id = F.id
ORDER BY created ASC
LIMIT 1
) EARLIEST_VERSION ON TRUE
LEFT JOIN LATERAL (
SELECT _id from osf_guid
WHERE object_id = F.checkout_id
AND content_type_id = %s
LIMIT 1
) CHECKOUT_GUID ON TRUE
LEFT JOIN LATERAL (
SELECT P.total AS DOWNLOAD_COUNT FROM osf_pagecounter AS P
WHERE P._id = 'download:' || %s || ':' || F._id
LIMIT 1
) DOWNLOAD_COUNT ON TRUE
LEFT JOIN LATERAL (
SELECT EXISTS(
SELECT (1) FROM osf_fileversionusermetadata
INNER JOIN osf_fileversion ON osf_fileversionusermetadata.file_version_id = osf_fileversion.id
INNER JOIN osf_basefilenode_versions ON osf_fileversion.id = osf_basefilenode_versions.fileversion_id
WHERE osf_fileversionusermetadata.user_id = %s
AND osf_basefilenode_versions.basefilenode_id = F.id
LIMIT 1
)
) SEEN_FILE ON TRUE
LEFT JOIN LATERAL (
SELECT CASE WHEN SEEN_FILE.exists
THEN
CASE WHEN EXISTS(
SELECT (1) FROM osf_fileversionusermetadata
WHERE osf_fileversionusermetadata.file_version_id = LATEST_VERSION.fileversion_id
AND osf_fileversionusermetadata.user_id = %s
LIMIT 1
)
THEN
json_build_object('user', %s, 'seen', TRUE)
ELSE
json_build_object('user', %s, 'seen', FALSE)
END
ELSE
NULL
END
) SEEN_LATEST_VERSION ON TRUE
WHERE parent_id = %s
AND (NOT F.type IN ('osf.trashedfilenode', 'osf.trashedfile', 'osf.trashedfolder'))
""", [
user_content_type_id,
file_node.target._id,
user_pk,
user_pk,
user_id,
user_id,
file_node.id
])
return cursor.fetchone()[0] or []
@must_be_signed
@decorators.autoload_filenode(must_be='folder')
def osfstorage_create_child(file_node, payload, **kwargs):
parent = file_node # Just for clarity
name = payload.get('name')
user = OSFUser.load(payload.get('user'))
is_folder = payload.get('kind') == 'folder'
if getattr(file_node.target, 'is_registration', False) and not getattr(file_node.target, 'archiving', False):
raise HTTPError(
httplib.BAD_REQUEST,
data={
'message_short': 'Registered Nodes are immutable',
'message_long': "The operation you're trying to do cannot be applied to registered Nodes, which are immutable",
}
)
if not (name or user) or '/' in name:
raise HTTPError(httplib.BAD_REQUEST)
if getattr(file_node.target, 'is_quickfiles', False) and is_folder:
raise HTTPError(httplib.BAD_REQUEST, data={'message_long': 'You may not create a folder for QuickFiles'})
try:
# Create a save point so that we can rollback and unlock
# the parent record
with transaction.atomic():
if is_folder:
created, file_node = True, parent.append_folder(name)
else:
created, file_node = True, parent.append_file(name)
except (ValidationError, IntegrityError):
created, file_node = False, parent.find_child_by_name(name, kind=int(not is_folder))
if not created and is_folder:
raise HTTPError(httplib.CONFLICT, data={
'message_long': 'Cannot create folder "{name}" because a file or folder already exists at path "{path}"'.format(
name=file_node.name,
path=file_node.materialized_path,
)
})
if file_node.checkout and file_node.checkout._id != user._id:
raise HTTPError(httplib.FORBIDDEN, data={
'message_long': 'File cannot be updated due to checkout status.'
})
if not is_folder:
try:
metadata = dict(payload['metadata'], **payload['hashes'])
location = dict(payload['settings'], **dict(
payload['worker'], **{
'object': payload['metadata']['name'],
'service': payload['metadata']['provider'],
}
))
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
current_version = file_node.get_version()
new_version = file_node.create_version(user, location, metadata)
if not current_version or not current_version.is_duplicate(new_version):
update_storage_usage(file_node.target)
version_id = new_version._id
archive_exists = new_version.archive is not None
else:
version_id = None
archive_exists = False
return {
'status': 'success',
'archive': not archive_exists, # Should waterbutler also archive this file
'data': file_node.serialize(),
'version': version_id,
}, httplib.CREATED if created else httplib.OK
@must_be_signed
@must_not_be_registration
@decorators.autoload_filenode()
def osfstorage_delete(file_node, payload, target, **kwargs):
user = OSFUser.load(payload['user'])
auth = Auth(user)
#TODO Auth check?
if not auth:
raise HTTPError(httplib.BAD_REQUEST)
if file_node == OsfStorageFolder.objects.get_root(target=target):
raise HTTPError(httplib.BAD_REQUEST)
try:
file_node.delete(user=user)
except exceptions.FileNodeCheckedOutError:
raise HTTPError(httplib.FORBIDDEN)
except exceptions.FileNodeIsPrimaryFile:
raise HTTPError(httplib.FORBIDDEN, data={
'message_long': 'Cannot delete file as it is the primary file of preprint.'
})
update_storage_usage(file_node.target)
return {'status': 'success'}
@must_be_signed
@decorators.autoload_filenode(must_be='file')
def osfstorage_download(file_node, payload, **kwargs):
# Set user ID in session data for checking if user is contributor
# to project.
user_id = payload.get('user')
if user_id:
current_session = get_session()
current_session.data['auth_user_id'] = user_id
current_session.save()
if not request.args.get('version'):
version_id = None
else:
try:
version_id = int(request.args['version'])
except ValueError:
raise make_error(httplib.BAD_REQUEST, message_short='Version must be an integer if not specified')
version = file_node.get_version(version_id, required=True)
return {
'data': {
'name': file_node.name,
'path': version.location_hash,
},
'settings': {
osf_storage_settings.WATERBUTLER_RESOURCE: version.location[osf_storage_settings.WATERBUTLER_RESOURCE],
},
}
@must_have_permission(WRITE)
@decorators.autoload_filenode(must_be='file')
def osfstorage_add_tag(file_node, **kwargs):
data = request.get_json()
if file_node.add_tag(data['tag'], kwargs['auth']):
return {'status': 'success'}, httplib.OK
return {'status': 'failure'}, httplib.BAD_REQUEST
@must_have_permission(WRITE)
@decorators.autoload_filenode(must_be='file')
def osfstorage_remove_tag(file_node, **kwargs):
data = request.get_json()
try:
file_node.remove_tag(data['tag'], kwargs['auth'])
except TagNotFoundError:
return {'status': 'failure'}, httplib.CONFLICT
except InvalidTagError:
return {'status': 'failure'}, httplib.BAD_REQUEST
else:
return {'status': 'success'}, httplib.OK
@must_be_logged_in
def update_region(auth, **kwargs):
user = auth.user
user_settings = user.get_addon('osfstorage')
data = request.get_json()
try:
region_id = data['region_id']
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
try:
user_settings.set_region(region_id)
except ValueError:
raise HTTPError(404, data=dict(message_short='Region not found',
message_long='A storage region with this id does not exist'))
return {'message': 'User region updated.'}
|
|
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import math
from coremltools.converters.mil.mil import get_new_symbol, types
from coremltools.converters.mil.mil.types.symbolic import is_symbolic
MAX_SIZE_CONSTANT_FOLDING = 1024 * 1024 / 4 # When a fp32 const takes over 1MB, we won't create a const op for that
def broadcast_shapes(shape_x, shape_y):
"""
Check and broadcast given input shapes.
:param shape_x: tuple of int or symbols
Shape of the first tensor (possibly symbolic).
:param shape_y: tuple of int or symbols
Shape of the second tensor (possibly symbolic).
:return: tuple of int or symbols
Result from broadcast.
"""
shape_x = tuple(shape_x)
shape_y = tuple(shape_y)
if len(shape_x) < len(shape_y):
shape_x = tuple([1] * (len(shape_y) - len(shape_x))) + shape_x
if len(shape_y) < len(shape_x):
shape_y = tuple([1] * (len(shape_x) - len(shape_y))) + shape_y
ret_shapes = list()
for i in range(len(shape_x)):
x_unknown = is_symbolic(shape_x[i])
y_unknown = is_symbolic(shape_y[i])
if shape_x[i] == 1:
ret_shapes.append(shape_y[i])
elif shape_y[i] == 1:
ret_shapes.append(shape_x[i])
elif not y_unknown and shape_y[i] > 1:
if not x_unknown and shape_x[i] != shape_y[i]:
raise ValueError(
"Incompatible dim {} in shapes {} vs. {}".format(
i, shape_x, shape_y
)
)
ret_shapes.append(shape_y[i])
elif not x_unknown and shape_x[i] > 1:
if not y_unknown and shape_x[i] != shape_y[i]:
raise ValueError(
"Incompatible dim {} in shapes {} vs. {}".format(
i, shape_x, shape_y
)
)
ret_shapes.append(shape_x[i])
elif x_unknown or y_unknown:
ret_shapes.append(get_new_symbol())
else:
assert shape_x[i] == shape_y[i]
ret_shapes.append(shape_x[i])
return tuple(ret_shapes)
def promoted_primitive_type(type1, type2):
"""
Given a pair of tensor or primitive types, find the smallest type that can store an instance
of their primitive type.
"""
ptype1 = type1.get_primitive() if types.is_tensor(type1) else type1
ptype2 = type2.get_primitive() if types.is_tensor(type2) else type2
return types.promote_types(ptype1, ptype2)
def effective_kernel(kernel_shape, dilations):
"""
Args:
kernel_shape: tuple[int] representing the kernel shape in each
given dimension.
dilations: tuple[int] representing the dilation of the kernel
in each given dimension. Must be the same length as
kernel_shape, and is assumed to give the dimensions in
the same order as kernel_shape
Returns: tuple[int] representing the effective shape of the kernel
in each given dimension, with each dimension in the order given,
taking into account dilation.
See http://deeplearning.net/software/theano/tutorial/conv_arithmetic.html#dilated-convolutions
Note that a dilation of 1 is equivalent to having no dilation.
"""
if len(kernel_shape) != len(dilations):
raise ValueError(
"kernel_shape ({}) and dilations ({}) must be the same length".format(
len(kernel_shape), len(dilations)
)
)
return [(k - 1) * d + 1 for k, d in zip(kernel_shape, dilations)]
def aggregated_pad(
pad_type,
kernel_shape,
input_shape=None,
strides=None,
dilations=None,
custom_pad=None,
):
"""
Args
pad_type: string. Must be one of ('same', 'valid', 'custom')
kernel_shape: [kH, kW, ...]: spatial kernel dims (excluding channels)
input_shape: [iH, iW, ...]: spatial input dims (excluding channels)
Required iff pad_type == 'same'
strides: [sH, sW, ...]: spatial strides (excluding channels)
Required iff pad_type == 'same'
dilations: [dH, dW, ...]: dilations (excluding channels)
If not provided, defaults to [1, 1, ...], effectively no dilation.
custom_pad: Required iff pad_type == 'custom'.
custom_pad[2*i], custom_pad[2*i+1] are before/after custom padding
for spatial dim i.
Returns:
A list of total (before + after) padding for each spatial dimension in kernel_shape.
"""
num_spatial_dims = len(kernel_shape)
if dilations is None:
dilations = [1] * num_spatial_dims
elif len(dilations) != num_spatial_dims:
raise ValueError(
"dilations must have same length as kernel_shape ({}, but got {})".format(
num_spatial_dims, len(dilations)
)
)
if pad_type == "same":
if input_shape is None or len(input_shape) != num_spatial_dims:
raise ValueError(
"For SAME padding input_shape must not be None and must have "
"same length as kernel_shape ({}, but got {})".format(
num_spatial_dims,
len(input_shape) if input_shape is not None else "None",
)
)
if strides is None or len(strides) != num_spatial_dims:
raise ValueError(
"For SAME padding strides must not be None and must have "
"same length as kernel_shape ({}, but got {})".format(
num_spatial_dims, len(strides) if strides is not None else "None"
)
)
effective_ks = effective_kernel(kernel_shape, dilations)
return [
int(max(0, s * math.ceil(float(i) / float(s)) - i + k - s))
if not is_symbolic(i) else get_new_symbol()
for i, k, s in zip(input_shape, effective_ks, strides)
]
if pad_type == "valid":
return [0] * num_spatial_dims
if pad_type == "custom":
if custom_pad is None or len(custom_pad) != 2 * num_spatial_dims:
raise ValueError("Invalid custom_pad.")
return [
custom_pad[2 * d] + custom_pad[2 * d + 1] for d in range(num_spatial_dims)
]
raise ValueError('Invalid padding pad_type "{}"'.format(pad_type))
def spatial_dimensions_out_shape(
pad_type, input_shape, kernel_shape, strides, dilations=None, custom_pad=None, ceil_mode=False,
):
"""
Args
pad_type: string. Must be one of ('same', 'valid', 'custom')
input_shape: [iH, iW, ...]: spatial input dims (excluding channels)
Required iff pad_type == 'same'
kernel_shape: [kH, kW, ...]: spatial kernel dims (excluding channels)
strides: [sH, sW, ...]: spatial strides (excluding channels)
Required iff pad_type == 'same'
dilations: [dH, dW, ...]: dilations (excluding channels)
If not provided, defaults to [1, 1, ...], effectively no dilation.
custom_pad: Required iff pad_type == 'custom'.
custom_pad[2*i], custom_pad[2*i+1] are before/after custom padding
for spatial dim i.
ceil_mode: determines the padding and output shape.
When ceil mode is True:
out_dim = floor((in_dim + pad_l + pad_r - kernel_size + (stride-1)) / stride) + 1
if (out_dim-1) * stride >= in_dim + pad_l and (pad_l > 0 or pad_r > 0):
out_dim = out_dim - 1
When ceil mode is False:
out_dim = floor((in_dim + pad_l + pad_r - kernel_size) / stride) + 1
Returns:
A list of spatial output sizes for each spatial dimension of kernel_shape.
"""
num_spatial_dims = len(kernel_shape)
if dilations is None:
dilations = [1] * num_spatial_dims
if custom_pad is None:
custom_pad = [0] * num_spatial_dims * 2
if not (
len(input_shape)
== len(kernel_shape)
== len(strides)
== len(dilations)
== len(custom_pad) / 2
):
raise ValueError(
"input_shape (length {}), kernel_shape (length {}), "
"strides (length {}), dilations (length {}), and "
"custom_pad (length {}) divided by two must all be "
"the same length".format(
len(input_shape),
len(kernel_shape),
len(strides),
len(dilations),
len(custom_pad),
)
)
pad = aggregated_pad(
pad_type=pad_type,
kernel_shape=kernel_shape,
input_shape=input_shape,
strides=strides,
dilations=dilations,
custom_pad=custom_pad,
)
effective_ks = effective_kernel(kernel_shape, dilations)
out_shape = []
for r in range(num_spatial_dims):
if is_symbolic(input_shape[r]):
out_shape.append(get_new_symbol())
else:
if not ceil_mode:
out_shape.append(math.floor((input_shape[r] + pad[r] - effective_ks[r]) / strides[r] + 1))
else:
out_dim = math.floor((input_shape[r] + pad[r] - effective_ks[r] + strides[r] - 1) / strides[r] + 1)
if (out_dim - 1) * strides[r] >= input_shape[r] + pad[r]/2 and pad[r] > 0:
out_dim = out_dim - 1
out_shape.append(out_dim)
return out_shape
def parse_einsum_equation(equation):
"""
Args
equation : str
parse the equation in the following manner:
(running example: "nchw,nwhr->nchr")
step 1: split the equation with delimiter "->"
e.g.: this will give "nchw,nwhr" and "nchr"
step 2: split the LHS equation string with delimiter ","
e.g.: this will give input1 : "nchw", input2: "nwhr"
step 3: map each character to a unique integer, which is incremented.
Iterate over input1, input2 and output, in that order.
e.g.: input 1, i.e., "nchw" will give vector {0,1,2,3}
input 2, i.e, "nwhr" will produce {0,3,2,4}
output , i.e. "nchr" will produce {0,1,2,4}
return vectors corresponding to the 2 inputs and the output
"""
input_output_str = equation.split('->')
assert len(input_output_str) == 2, "unsupported einsum equation {}".format(equation)
input_str = input_output_str[0]
output_str = input_output_str[1]
inputs = input_str.split(',')
assert len(inputs) == 2, "unsupported einsum equation {}".format(equation)
input1_str = inputs[0]
input2_str = inputs[1]
input1_vec = [-1 for i in range(len(input1_str))]
input2_vec = [-1 for i in range(len(input2_str))]
output_vec = [-1 for i in range(len(output_str))]
map_char_to_int = {}
def _update_vec(str, vec, map_char_to_int, index):
for i, s in enumerate(str):
if s not in map_char_to_int:
map_char_to_int[s] = index
index += 1
vec[i] = map_char_to_int[s]
return index
index = _update_vec(input1_str, input1_vec, map_char_to_int, 0)
index = _update_vec(input2_str, input2_vec, map_char_to_int, index)
index = _update_vec(output_str, output_vec, map_char_to_int, index)
return input1_vec, input2_vec, output_vec
|
|
# -*- coding: utf-8 -*-
import hashlib
import os
from hamcrest import (
assert_that,
calling,
equal_to,
has_entries,
has_entry,
raises
)
from pydeform.exceptions import NotFoundError, ValidationError
from testutils import (
DeformSessionProjectClientTestCaseMixin,
DeformTokenProjectClientTestCaseMixin,
TestCase
)
from unittest import skip
class ProjectClientTestBase__document(object):
project_client_attr = None
def setUp(self):
super(ProjectClientTestBase__document, self).setUp()
try:
getattr(self, self.project_client_attr).collection.remove(
identity='venues'
)
except NotFoundError:
pass
def test_get(self):
response = getattr(self, self.project_client_attr).document.get(
identity=self.CONFIG['DEFORM']['PROJECT_TOKEN'],
collection='_tokens'
)
assert_that(
response,
has_entries({
'name': 'First project Token',
'_id': self.CONFIG['DEFORM']['PROJECT_TOKEN'],
})
)
def test_get_not_existing_document(self):
assert_that(
calling(
getattr(self, self.project_client_attr).document.get
).with_args(
identity='not_existing_document',
collection='_tokens'
),
raises(NotFoundError, '^Document not found\.$')
)
def test_with_fields(self):
response = getattr(self, self.project_client_attr).document.get(
identity=self.CONFIG['DEFORM']['PROJECT_TOKEN'],
collection='_tokens',
fields=['name']
)
assert_that(
response,
equal_to({
'name': 'First project Token',
'_id': self.CONFIG['DEFORM']['PROJECT_TOKEN'],
})
)
def test_with_fields_exclude(self):
response = getattr(self, self.project_client_attr).document.get(
identity=self.CONFIG['DEFORM']['PROJECT_TOKEN'],
collection='_tokens',
fields_exclude=['name']
)
assert_that('name' in response, equal_to(False))
def test_create(self):
try:
getattr(self, self.project_client_attr).document.remove(
collection='venues',
identity='subway'
)
except NotFoundError:
pass
response = getattr(self, self.project_client_attr).document.create(
collection='venues',
data={
'_id': 'subway',
'name': 'Subway'
}
)
assert_that(
response,
has_entries({
'_id': 'subway',
'name': 'Subway'
})
)
@skip('https://github.com/deformio/python-deform/issues/11')
def test_get_document_with_id_containing_slash(self):
try:
getattr(self, self.project_client_attr).document.remove(
collection='venues',
identity='sub/way'
)
except NotFoundError:
pass
getattr(self, self.project_client_attr).document.create(
collection='venues',
data={
'_id': 'sub/way',
'name': 'Subway'
}
)
response = getattr(self, self.project_client_attr).document.get(
identity='sub/way',
collection='venues'
)
assert_that(
response,
has_entries({
'_id': 'sub/way',
'name': 'Subway'
})
)
def test_get_property(self):
try:
getattr(self, self.project_client_attr).document.remove(
collection='venues',
identity='subway'
)
except NotFoundError:
pass
response = getattr(self, self.project_client_attr).document.create(
collection='venues',
data={
'_id': 'subway',
'name': 'Subway',
'comment': {
'user': {
'name': 'gennady',
'surname': 'chibisov'
}
}
}
)
response = getattr(self, self.project_client_attr).document.get(
collection='venues',
identity='subway',
property=['comment', 'user']
)
assert_that(
response,
equal_to({
'name': 'gennady',
'surname': 'chibisov'
})
)
def test_get_not_existing_property(self):
try:
getattr(self, self.project_client_attr).document.remove(
collection='venues',
identity='subway'
)
except NotFoundError:
pass
getattr(self, self.project_client_attr).document.create(
collection='venues',
data={
'_id': 'subway',
'name': 'Subway',
'comment': {
'user': {
'name': 'gennady'
}
}
}
)
assert_that(
calling(
getattr(self, self.project_client_attr).document.get
).with_args(
collection='venues',
identity='subway',
property=['comment', 'user', 'surname']
),
raises(NotFoundError, '^Document property not found\.$')
)
def test_save__with_identity(self):
try:
getattr(self, self.project_client_attr).document.remove(
identity='subway',
collection='venues'
)
except NotFoundError:
pass
# first save
response = getattr(self, self.project_client_attr).document.save(
identity='subway',
collection='venues',
data={
'name': 'Subway'
}
)
assert_that(response['created'], equal_to(True))
assert_that(
response['result'],
has_entries({
'_id': 'subway',
'name': 'Subway'
})
)
# second save
response = getattr(self, self.project_client_attr).document.save(
identity='subway',
collection='venues',
data={
'name': 'Subway saved with identity'
}
)
assert_that(response['created'], equal_to(False))
assert_that(
response['result'],
has_entries({
'_id': 'subway',
'name': 'Subway saved with identity',
})
)
def test_save__without_identity(self):
try:
getattr(self, self.project_client_attr).document.remove(
identity='subway',
collection='venues'
)
except NotFoundError:
pass
# first save
response = getattr(self, self.project_client_attr).document.save(
collection='venues',
data={
'_id': 'subway',
'name': 'Subway'
}
)
assert_that(response['created'], equal_to(True))
assert_that(
response['result'],
has_entries({
'_id': 'subway',
'name': 'Subway'
})
)
# second save
response = getattr(self, self.project_client_attr).document.save(
collection='venues',
data={
'_id': 'subway',
'name': 'Subway saved with identity'
}
)
assert_that(response['created'], equal_to(False))
assert_that(
response['result'],
has_entries({
'_id': 'subway',
'name': 'Subway saved with identity',
})
)
def test_save_property(self):
try:
getattr(self, self.project_client_attr).document.remove(
identity='subway',
collection='venues'
)
except NotFoundError:
pass
getattr(self, self.project_client_attr).document.create(
collection='venues',
data={
'_id': 'subway',
'name': 'Subway',
'comment': {
'user': {
'name': 'gennady'
}
}
}
)
response = getattr(self, self.project_client_attr).document.save(
identity='subway',
collection='venues',
property=['comment', 'user', 'surname'],
data='chibisov'
)
assert_that(response['created'], equal_to(True))
assert_that(response['result'], equal_to('chibisov'))
response = getattr(self, self.project_client_attr).document.get(
identity='subway',
collection='venues',
property=['comment', 'user']
)
assert_that(
response,
has_entries({
'name': 'gennady',
'surname': 'chibisov',
})
)
# should replace property
response = getattr(self, self.project_client_attr).document.save(
identity='subway',
collection='venues',
property=['comment', 'user'],
data={
'name': 'andrey'
}
)
assert_that(response['created'], equal_to(False))
assert_that(response['result'], equal_to({'name': 'andrey'}))
response = getattr(self, self.project_client_attr).document.get(
identity='subway',
collection='venues',
property=['comment', 'user']
)
assert_that(
response,
equal_to({
'name': 'andrey'
})
)
def test_update(self):
try:
getattr(self, self.project_client_attr).document.remove(
identity='subway',
collection='venues'
)
except NotFoundError:
pass
getattr(self, self.project_client_attr).document.create(
collection='venues',
data={
'_id': 'subway',
'name': 'Subway',
'comment': {
'user': 'gena',
'text': 'good'
}
}
)
response = getattr(self, self.project_client_attr).document.update(
identity='subway',
collection='venues',
data={
'comment': {
'text': 'bad'
}
}
)
assert_that(
response['comment'],
equal_to({
'text': 'bad'
})
)
def test_remove(self):
getattr(self, self.project_client_attr).document.save(
collection='venues',
data={
'_id': 'subway',
'name': 'Subway'
}
)
response = getattr(self, self.project_client_attr).document.remove(
identity='subway',
collection='venues'
)
assert_that(response, equal_to(None))
def test_schema_validation(self):
getattr(self, self.project_client_attr).collection.save(
identity='venues',
data={
'name': 'Venues',
'schema': {
'properties': {
'name': {
'type': 'string',
'required': True
}
}
}
}
)
assert_that(
calling(
getattr(self, self.project_client_attr).document.save
).with_args(
collection='venues',
data={
'_id': 'subway'
}
),
raises(ValidationError, '^Validation error')
)
response = getattr(self, self.project_client_attr).document.save(
collection='venues',
data={
'_id': 'subway',
'name': 'Subway'
}
)
assert_that(
response['result'],
has_entry('name', 'Subway')
)
def test_file(self):
# remove all files
getattr(self, self.project_client_attr).documents.remove(
collection='_files'
)
getattr(self, self.project_client_attr).collection.save(
identity='venues',
data={
'name': 'Venues',
'schema': {
'properties': {
'name': {
'type': 'string',
'required': True
},
'phones': {
'type': 'array',
'items': {
'type': 'number'
}
},
'info': {
'type': 'file',
'required': True
},
'logo': {
'type': 'file',
'required': True
}
}
}
}
)
text_file = open(
os.path.join(self.CONFIG['FILES_PATH'], '1.txt'), 'rt'
)
image_file = open(
os.path.join(self.CONFIG['FILES_PATH'], '1.png'), 'rb'
)
# test upload
response = getattr(self, self.project_client_attr).document.save(
identity='subway',
collection='venues',
data={
'name': 'subway',
'phones': [1234, 5678],
'info': text_file,
'logo': image_file,
}
)
result = response['result']
text_file.seek(0)
image_file.seek(0)
text_file_content = text_file.read()
image_file_content = image_file.read()
assert_that(
result,
has_entries({
'name': 'subway',
'phones': [1234, 5678],
'info': has_entries({
'name': '1.txt',
'content_type': 'text/plain',
'md5': hashlib.md5(text_file_content).hexdigest()
}),
'logo': has_entries({
'name': '1.png',
'content_type': 'image/png',
'md5': hashlib.md5(image_file_content).hexdigest()
})
})
)
# test download
info_content_response = getattr(
self,
self.project_client_attr
).document.get(
identity='subway',
collection='venues',
property=['info', 'content']
)
assert_that(info_content_response, equal_to(text_file_content))
logo_content_response = getattr(
self,
self.project_client_attr
).document.get(
identity='subway',
collection='venues',
property=['logo', 'content']
)
assert_that(logo_content_response, equal_to(image_file_content))
# test getting file object
info_content_stream_response = getattr(
self,
self.project_client_attr
).document.get_file(
identity='subway',
collection='venues',
property=['info'],
)
assert_that(
info_content_stream_response.read(),
equal_to(text_file_content)
)
logo_content_stream_response = getattr(
self,
self.project_client_attr
).document.get_file(
identity='subway',
collection='venues',
property=['logo'],
)
assert_that(
logo_content_stream_response.read(),
equal_to(image_file_content)
)
def test_file_upload_directly_to_files_collection(self):
getattr(self, self.project_client_attr).documents.remove(
collection='_files'
)
text_file = open(
os.path.join(self.CONFIG['FILES_PATH'], '1.txt'), 'rt'
)
image_file = open(
os.path.join(self.CONFIG['FILES_PATH'], '1.png'), 'rb'
)
# test upload text file
response = getattr(self, self.project_client_attr).document.save(
identity='text_file',
collection='_files',
data=text_file
)
text_file_result = response['result']
text_file.seek(0)
text_file_content = text_file.read()
assert_that(
text_file_result,
has_entries({
'_id': 'text_file',
'collection_id': '_files',
'content_type': 'text/plain',
'document_id': '',
'name': '1.txt',
'md5': hashlib.md5(text_file_content).hexdigest()
})
)
# test upload binary file
response = getattr(self, self.project_client_attr).document.save(
identity='image_file',
collection='_files',
data=image_file
)
text_file_result = response['result']
image_file.seek(0)
image_file_content = image_file.read()
assert_that(
text_file_result,
has_entries({
'_id': 'image_file',
'collection_id': '_files',
'content_type': 'image/png',
'document_id': '',
'name': '1.png',
'md5': hashlib.md5(image_file_content).hexdigest()
})
)
# test getting text file object
text_file_content_stream_response = getattr(
self,
self.project_client_attr
).document.get_file(
identity='text_file',
collection='_files'
)
assert_that(
text_file_content_stream_response.read(),
equal_to(text_file_content)
)
# test getting image file object
image_file_content_stream_response = getattr(
self,
self.project_client_attr
).document.get_file(
identity='image_file',
collection='_files'
)
assert_that(
image_file_content_stream_response.read(),
equal_to(image_file_content)
)
class SessionProjectClientTest__document(
ProjectClientTestBase__document,
DeformSessionProjectClientTestCaseMixin,
TestCase
):
project_client_attr = 'deform_session_project_client'
class TokenProjectClientTest__document(
ProjectClientTestBase__document,
DeformTokenProjectClientTestCaseMixin,
TestCase
):
project_client_attr = 'deform_token_project_client'
|
|
__author__ = 'teemu kanstren'
import time
import os
import unittest
import shutil
import inspect
from elasticsearch import Elasticsearch
import pkg_resources
from pypro.local.loggers.es_network_logger import ESNetLogger
from pypro import utils
import pypro.tests.t_assert as t_assert
import pypro.local.config as config
#weird regex syntax, pattern required, weird errors...
#es = Elasticsearch()
#config.ES_INDEX = "pypro_tests"
#data = es.search(index=config.ES_INDEX)
#items = data["hits"]["hits"]
#print(str(items[0]["_source"]["bytes_sent"]))
# data = self.es.get(config.ES_INDEX, doc_type="system_cpu")
#print(str(data))
class TestESLogs(unittest.TestCase):
@classmethod
def setUpClass(cls):
config.DB_NAME = "pypro_tests"
config.PRINT_CONSOLE = False
def setUp(self):
if os.path.exists(utils.log_dir):
shutil.rmtree(utils.log_dir, ignore_errors=True)
self.es = Elasticsearch()
self.es.delete_by_query(config.DB_NAME, body='{"query":{"match_all":{}}}', ignore=[404])
def test_cpu_sys_es(self):
log = ESNetLogger()
log.cpu_sys(0, 1, 1, 1, 1)
log.cpu_sys(1, 3, 2, 5, 6)
log.cpu_sys(3, 22, 99, 11, 4)
log.cpu_sys(5, 155, 122, 12, 22)
log.close()
time.sleep(1)
data = self.es.search(index=config.DB_NAME, body='{"query":{"match_all":{}}, "sort": { "time": { "order": "asc" }}}')
items = data["hits"]["hits"]
self.assertEqual(4, len(items), "number of cpu sys items logged")
self.assert_cpu_sys(items[0]["_source"], 0, 1, 1, 1, 1)
self.assert_cpu_sys(items[1]["_source"], 1, 3, 2, 5, 6)
self.assert_cpu_sys(items[2]["_source"], 3, 22, 99, 11, 4)
self.assert_cpu_sys(items[3]["_source"], 5, 155, 122, 12, 22)
def assert_cpu_sys(self, item, time, user_count, system_count, idle_count, percent):
self.assertEqual(5, len(item), "number of properties for a cpu sys item")
self.assertEqual(item['time'], time)
self.assertEqual(item['percent'], percent)
self.assertEqual(item['idle_count'], idle_count)
self.assertEqual(item['system_count'], system_count)
self.assertEqual(item['user_count'], user_count)
def test_cpu_proc_es(self):
log = ESNetLogger()
log.cpu_proc(0, 1, 1, 1, 1, 1, 1, 1, "p1")
log.cpu_proc(1, 2, 1, 3, 4, 2, 3, 1, "p2")
log.cpu_proc(2, 3, 2, 122, 7, 5, 8, 11, "p3")
log.cpu_proc(10, 1, 1, 1, 1, 1, 1, 1, "p1")
log.cpu_proc(11, 2, 1, 3, 4, 2, 3, 1, "p2")
log.cpu_proc(12, 3, 2, 122, 7, 5, 8, 11, "p3")
log.cpu_proc(20, 1, 1, 5, 1, 4, 3, 2, "p1")
log.cpu_proc(21, 3, 2, 555, 7, 11, 55, 32, "p3")
log.close()
time.sleep(1)
data = self.es.search(index=config.DB_NAME, body='{"query":{"match_all":{}}, "sort": { "time": { "order": "asc" }}}')
items = data["hits"]["hits"]
self.assertEqual(8, len(items), "number of cpu proc items logged")
# print(str(items[0]["_source"]))
self.assert_cpu_proc(items[0]["_source"], 0, 1, 1, 1, 1, 1, 1, 1, "p1")
self.assert_cpu_proc(items[1]["_source"], 1, 2, 1, 3, 4, 2, 3, 1, "p2")
self.assert_cpu_proc(items[2]["_source"], 2, 3, 2, 122, 7, 5, 8, 11, "p3")
self.assert_cpu_proc(items[3]["_source"], 10, 1, 1, 1, 1, 1, 1, 1, "p1")
self.assert_cpu_proc(items[4]["_source"], 11, 2, 1, 3, 4, 2, 3, 1, "p2")
self.assert_cpu_proc(items[5]["_source"], 12, 3, 2, 122, 7, 5, 8, 11, "p3")
self.assert_cpu_proc(items[6]["_source"], 20, 1, 1, 5, 1, 4, 3, 2, "p1")
self.assert_cpu_proc(items[7]["_source"], 21, 3, 2, 555, 7, 11, 55, 32, "p3")
def assert_cpu_proc(self, item, time, pid, priority, ctx_count, n_threads, cpu_user, cpu_system, percent, pname):
self.assertEqual(9, len(item), "number of properties for a cpu sys item")
self.assertEqual(item['time'], time)
self.assertEqual(item['pid'], pid)
self.assertEqual(item['priority'], priority)
self.assertEqual(item['context_switches'], ctx_count)
self.assertEqual(item['threads'], n_threads)
self.assertEqual(item['cpu_user'], cpu_user)
self.assertEqual(item['cpu_system'], cpu_system)
self.assertEqual(item['percent'], percent)
self.assertEqual(item['pname'], pname)
# args = locals().copy()
# del args["self"]
# del args["item"]
# expected_len = len(args) #reduce self and item
# actual_len = len(item)
# self.assertEqual(expected_len, actual_len, "number of properties for a cpu proc")
# for arg in args:
# value = args[arg]
# if arg == "time":
# value *= 1000
# self.assertEqual(item[arg], value)
def test_mem_sys_es(self):
log = ESNetLogger()
log.mem_sys(0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
log.mem_sys(10, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2)
log.mem_sys(12, 34, 654, 24, 33, 23, 442, 1, 13, 21, 44)
log.mem_sys(15, 3445, 345, 345, 44, 745, 367, 32, 1111, 33, 55)
log.mem_sys(33, 33, 453, 998, 347, 976, 8544, 45, 5555, 66, 33)
log.close()
time.sleep(1)
data = self.es.search(index=config.DB_NAME, body='{"query":{"match_all":{}}, "sort": { "time": { "order": "asc" }}}')
items = data["hits"]["hits"]
self.assertEqual(5, len(items), "number of mem sys items logged")
self.assert_mem_sys(items[0]["_source"], 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
self.assert_mem_sys(items[1]["_source"], 10, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2)
self.assert_mem_sys(items[2]["_source"], 12, 34, 654, 24, 33, 23, 442, 1, 13, 21, 44)
self.assert_mem_sys(items[3]["_source"], 15, 3445, 345, 345, 44, 745, 367, 32, 1111, 33, 55)
self.assert_mem_sys(items[4]["_source"], 33, 33, 453, 998, 347, 976, 8544, 45, 5555, 66, 33)
def assert_mem_sys(self, item, time, available, percent, used, free,
swap_total, swap_used, swap_free, swap_in, swap_out, swap_percent):
self.assertEqual(11, len(item), "number of properties for a mem sys item")
self.assertEqual(item['time'], time)
self.assertEqual(item['available'], available)
self.assertEqual(item['percent'], percent)
self.assertEqual(item['used'], used)
self.assertEqual(item['free'], free)
self.assertEqual(item['swap_total'], swap_total)
self.assertEqual(item['swap_used'], swap_used)
self.assertEqual(item['swap_free'], swap_free)
self.assertEqual(item['swap_in'], swap_in)
self.assertEqual(item['swap_out'], swap_out)
self.assertEqual(item['swap_percent'], swap_percent)
def test_mem_proc_es(self):
log = ESNetLogger()
log.mem_proc(0, 1, 11, 15, 5, "p1")
log.mem_proc(1, 2, 1, 3, 2, "p2")
log.mem_proc(2, 5432, 21, 33, 9, "p3")
log.mem_proc(5, 1, 22, 11, 3, "p1")
log.mem_proc(6, 5432, 7, 55, 7, "p3")
log.mem_proc(66, 1, 11, 15, 5, "p1")
log.mem_proc(67, 2, 11, 0, 22, "p2")
log.mem_proc(68, 5432, 212, 334, 44, "p3")
log.close()
time.sleep(1)
data = self.es.search(index=config.DB_NAME, body='{"query":{"match_all":{}}, "sort": { "time": { "order": "asc" }}}')
items = data["hits"]["hits"]
self.assertEqual(8, len(items), "number of mem proc items logged")
# print(str(items[0]["_source"]))
self.assert_mem_proc(items[0]["_source"], 0, 1, 11, 15, 5, "p1")
self.assert_mem_proc(items[1]["_source"], 1, 2, 1, 3, 2, "p2")
self.assert_mem_proc(items[2]["_source"], 2, 5432, 21, 33, 9, "p3")
self.assert_mem_proc(items[3]["_source"], 5, 1, 22, 11, 3, "p1")
self.assert_mem_proc(items[4]["_source"], 6, 5432, 7, 55, 7, "p3")
self.assert_mem_proc(items[5]["_source"], 66, 1, 11, 15, 5, "p1")
self.assert_mem_proc(items[6]["_source"], 67, 2, 11, 0, 22, "p2")
self.assert_mem_proc(items[7]["_source"], 68, 5432, 212, 334, 44, "p3")
def assert_mem_proc(self, item, time, pid, rss, vms, percent, pname):
self.assertEqual(6, len(item), "number of properties for a mem proc item")
self.assertEqual(item['time'], time)
self.assertEqual(item['pid'], pid)
self.assertEqual(item['rss'], rss)
self.assertEqual(item['vms'], vms)
self.assertEqual(item['percent'], percent)
self.assertEqual(item['pname'], pname)
def test_io_sys_es(self):
log = ESNetLogger()
log.io_sys(11111, 22, 22, 34, 43, 11, 11, 5, 3)
log.io_sys(22222, 55, 23, 44, 34, 23, 17, 15, 4)
log.io_sys(22233, 65, 23, 777, 44, 28, 18, 35, 5)
log.io_sys(25555, 78, 44, 1911, 53, 99434, 43, 43, 21)
log.close()
time.sleep(1)
data = self.es.search(index=config.DB_NAME, body='{"query":{"match_all":{}}, "sort": { "time": { "order": "asc" }}}')
items = data["hits"]["hits"]
self.assertEqual(4, len(items), "number of mem sys items logged")
# print(str(items[0]["_source"]))
self.assert_io_sys(items[0]["_source"], 11111, 22, 22, 34, 43, 11, 11, 5, 3)
self.assert_io_sys(items[1]["_source"], 22222, 55, 23, 44, 34, 23, 17, 15, 4)
self.assert_io_sys(items[2]["_source"], 22233, 65, 23, 777, 44, 28, 18, 35, 5)
self.assert_io_sys(items[3]["_source"], 25555, 78, 44, 1911, 53, 99434, 43, 43, 21)
def assert_io_sys(self, item, time, bytes_sent, bytes_recv, packets_sent, packets_recv, errin, errout, dropin, dropout):
self.assertEqual(9, len(item), "number of properties for a mem proc item")
self.assertEqual(item['time'], time)
self.assertEqual(item['bytes_sent'], bytes_sent)
self.assertEqual(item['bytes_recv'], bytes_recv)
self.assertEqual(item['packets_sent'], packets_sent)
self.assertEqual(item['packets_received'], packets_recv)
self.assertEqual(item['errors_in'], errin)
self.assertEqual(item['errors_out'], errout)
self.assertEqual(item['dropped_in'], dropin)
self.assertEqual(item['dropped_out'], dropout)
def test_proc_error_es(self):
log = ESNetLogger()
log.proc_error(11111, 22, "epic fail")
log.proc_error(11112, 9758, "fail")
log.proc_error(11113, 7364, "little fail")
log.close()
time.sleep(1)
data = self.es.search(index=config.DB_NAME, body='{"query":{"match_all":{}}, "sort": { "time": { "order": "asc" }}}')
items = data["hits"]["hits"]
self.assertEqual(3, len(items), "number of mem sys items logged")
# print(str(items[0]["_source"]))
self.assert_proc_error(items[0]["_source"], 11111, 22, "epic fail")
self.assert_proc_error(items[1]["_source"], 11112, 9758, "fail")
self.assert_proc_error(items[2]["_source"], 11113, 7364, "little fail")
def assert_proc_error(self, item, time, pid, name):
self.assertEqual(3, len(item), "number of properties for a proc error item")
self.assertEqual(item['time'], time)
self.assertEqual(item['pid'], pid)
self.assertEqual(item['name'], name)
def test_proc_info_es(self):
log = ESNetLogger()
log.proc_info(11111, 22, "proc1")
log.proc_info(11111, 9758, "proc2")
log.proc_info(11111, 7364, "proc4")
log.proc_info(11111, 3332, "proc3")
log.close()
time.sleep(1)
data = self.es.search(index=config.DB_NAME, body='{"query":{"match_all":{}}, "sort": { "pid": { "order": "asc" }}}')
items = data["hits"]["hits"]
self.assertEqual(4, len(items), "number of mem sys items logged")
# print(str(items[0]["_source"]))
self.assert_proc_error(items[0]["_source"], 11111, 22, "proc1")
self.assert_proc_error(items[1]["_source"], 11111, 3332, "proc3")
self.assert_proc_error(items[2]["_source"], 11111, 7364, "proc4")
self.assert_proc_error(items[3]["_source"], 11111, 9758, "proc2")
def assert_proc_info(self, item, time, pid, name):
self.assertEqual(3, len(item), "number of properties for a proc info item")
self.assertEqual(item['time'], time)
self.assertEqual(item['pid'], pid)
self.assertEqual(item['name'], name)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#============================================================================
#
# Parts of this file are based upon xmlrpclib.py, the XML-RPC client
# interface included in the Python distribution.
#
# Copyright (c) 1999-2002 by Secret Labs AB
# Copyright (c) 1999-2002 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
"""
A fake XenAPI SDK.
"""
import random
import uuid
from xml.sax import saxutils
import pprint
from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
_CLASSES = ['host', 'network', 'session', 'pool', 'SR', 'VBD',
'PBD', 'VDI', 'VIF', 'PIF', 'VM', 'VLAN', 'task']
_db_content = {}
LOG = logging.getLogger(__name__)
def log_db_contents(msg=None):
text = msg or ""
content = pprint.pformat(_db_content)
LOG.debug(_("%(text)s: _db_content => %(content)s") % locals())
def reset():
for c in _CLASSES:
_db_content[c] = {}
host = create_host('fake')
create_vm('fake',
'Running',
is_a_template=False,
is_control_domain=True,
resident_on=host)
def reset_table(table):
if not table in _CLASSES:
return
_db_content[table] = {}
def create_pool(name_label):
return _create_object('pool',
{'name_label': name_label})
def create_host(name_label, hostname='fake_name', address='fake_addr'):
return _create_object('host',
{'name_label': name_label,
'hostname': hostname,
'address': address})
def create_network(name_label, bridge):
return _create_object('network',
{'name_label': name_label,
'bridge': bridge})
def create_vm(name_label, status, **kwargs):
domid = status == 'Running' and random.randrange(1, 1 << 16) or -1
vm_rec = kwargs.copy()
vm_rec.update({'name_label': name_label,
'domid': domid,
'power_state': status})
vm_ref = _create_object('VM', vm_rec)
after_VM_create(vm_ref, vm_rec)
return vm_ref
def destroy_vm(vm_ref):
vm_rec = _db_content['VM'][vm_ref]
vbd_refs = vm_rec['VBDs']
# NOTE(johannes): Shallow copy since destroy_vbd will remove itself
# from the list
for vbd_ref in vbd_refs[:]:
destroy_vbd(vbd_ref)
del _db_content['VM'][vm_ref]
def destroy_vbd(vbd_ref):
vbd_rec = _db_content['VBD'][vbd_ref]
vm_ref = vbd_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VBDs'].remove(vbd_ref)
vdi_ref = vbd_rec['VDI']
vdi_rec = _db_content['VDI'][vdi_ref]
vdi_rec['VBDs'].remove(vbd_ref)
del _db_content['VBD'][vbd_ref]
def destroy_vdi(vdi_ref):
vdi_rec = _db_content['VDI'][vdi_ref]
vbd_refs = vdi_rec['VBDs']
# NOTE(johannes): Shallow copy since destroy_vbd will remove itself
# from the list
for vbd_ref in vbd_refs[:]:
destroy_vbd(vbd_ref)
del _db_content['VDI'][vdi_ref]
def create_vdi(name_label, sr_ref, **kwargs):
vdi_rec = {
'SR': sr_ref,
'read_only': False,
'type': '',
'name_label': name_label,
'name_description': '',
'sharable': False,
'other_config': {},
'location': '',
'xenstore_data': {},
'sm_config': {},
'physical_utilisation': '123',
'managed': True,
}
vdi_rec.update(kwargs)
vdi_ref = _create_object('VDI', vdi_rec)
after_VDI_create(vdi_ref, vdi_rec)
return vdi_ref
def after_VDI_create(vdi_ref, vdi_rec):
vdi_rec.setdefault('VBDs', [])
def create_vbd(vm_ref, vdi_ref, userdevice=0):
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': str(userdevice),
'currently_attached': False}
vbd_ref = _create_object('VBD', vbd_rec)
after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
def after_VBD_create(vbd_ref, vbd_rec):
"""Create read-only fields and backref from VM and VDI to VBD when VBD
is created."""
vbd_rec['currently_attached'] = False
vbd_rec['device'] = ''
vm_ref = vbd_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VBDs'].append(vbd_ref)
vdi_ref = vbd_rec['VDI']
vdi_rec = _db_content['VDI'][vdi_ref]
vdi_rec['VBDs'].append(vbd_ref)
vm_name_label = _db_content['VM'][vm_ref]['name_label']
vbd_rec['vm_name_label'] = vm_name_label
def after_VM_create(vm_ref, vm_rec):
"""Create read-only fields in the VM record."""
vm_rec.setdefault('is_control_domain', False)
vm_rec.setdefault('memory_static_max', str(8 * 1024 * 1024 * 1024))
vm_rec.setdefault('memory_dynamic_max', str(8 * 1024 * 1024 * 1024))
vm_rec.setdefault('VCPUs_max', str(4))
vm_rec.setdefault('VBDs', [])
def create_pbd(config, host_ref, sr_ref, attached):
return _create_object('PBD',
{'device-config': config,
'host': host_ref,
'SR': sr_ref,
'currently-attached': attached})
def create_task(name_label):
return _create_object('task',
{'name_label': name_label,
'status': 'pending'})
def create_local_pifs():
"""Adds a PIF for each to the local database with VLAN=-1.
Do this one per host."""
for host_ref in _db_content['host'].keys():
_create_local_pif(host_ref)
def create_local_srs():
"""Create an SR that looks like the one created on the local disk by
default by the XenServer installer. Do this one per host. Also, fake
the installation of an ISO SR."""
for host_ref in _db_content['host'].keys():
create_sr(name_label='Local storage',
type='lvm',
other_config={'i18n-original-value-name_label':
'Local storage',
'i18n-key': 'local-storage'},
physical_utilisation=20000,
virtual_allocation=10000,
host_ref=host_ref)
create_sr(name_label='Local storage ISO',
type='iso',
other_config={'i18n-original-value-name_label':
'Local storage ISO',
'i18n-key': 'local-storage-iso'},
physical_utilisation=40000,
virtual_allocation=80000,
host_ref=host_ref)
def create_sr(**kwargs):
sr_ref = _create_object(
'SR',
{'name_label': kwargs.get('name_label'),
'type': kwargs.get('type'),
'content_type': kwargs.get('type', 'user'),
'shared': kwargs.get('shared', False),
'physical_size': kwargs.get('physical_size', str(1 << 30)),
'physical_utilisation': str(
kwargs.get('physical_utilisation', 0)),
'virtual_allocation': str(kwargs.get('virtual_allocation', 0)),
'other_config': kwargs.get('other_config', {}),
'VDIs': kwargs.get('VDIs', [])})
pbd_ref = create_pbd('', kwargs.get('host_ref'), sr_ref, True)
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
return sr_ref
def _create_local_pif(host_ref):
pif_ref = _create_object('PIF',
{'name-label': 'Fake PIF',
'MAC': '00:11:22:33:44:55',
'physical': True,
'VLAN': -1,
'device': 'fake0',
'host_uuid': host_ref})
return pif_ref
def _create_object(table, obj):
ref = str(uuid.uuid4())
obj['uuid'] = str(uuid.uuid4())
_db_content[table][ref] = obj
return ref
def _create_sr(table, obj):
sr_type = obj[6]
# Forces fake to support iscsi only
if sr_type != 'iscsi' and sr_type != 'nfs':
raise Failure(['SR_UNKNOWN_DRIVER', sr_type])
host_ref = _db_content['host'].keys()[0]
sr_ref = _create_object(table, obj[2])
if sr_type == 'iscsi':
vdi_ref = create_vdi('', sr_ref)
pbd_ref = create_pbd('', host_ref, sr_ref, True)
_db_content['SR'][sr_ref]['VDIs'] = [vdi_ref]
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
_db_content['VDI'][vdi_ref]['SR'] = sr_ref
_db_content['PBD'][pbd_ref]['SR'] = sr_ref
return sr_ref
def _create_vlan(pif_ref, vlan_num, network_ref):
pif_rec = get_record('PIF', pif_ref)
vlan_pif_ref = _create_object('PIF',
{'name-label': 'Fake VLAN PIF',
'MAC': '00:11:22:33:44:55',
'physical': True,
'VLAN': vlan_num,
'device': pif_rec['device'],
'host_uuid': pif_rec['host_uuid']})
return _create_object('VLAN',
{'tagged-pif': pif_ref,
'untagged-pif': vlan_pif_ref,
'tag': vlan_num})
def get_all(table):
return _db_content[table].keys()
def get_all_records(table):
return _db_content[table]
def get_record(table, ref):
if ref in _db_content[table]:
return _db_content[table].get(ref)
else:
raise Failure(['HANDLE_INVALID', table, ref])
def check_for_session_leaks():
if len(_db_content['session']) > 0:
raise exception.NovaException('Sessions have leaked: %s' %
_db_content['session'])
def as_value(s):
"""Helper function for simulating XenAPI plugin responses. It
escapes and wraps the given argument."""
return '<value>%s</value>' % saxutils.escape(s)
def as_json(*args, **kwargs):
"""Helper function for simulating XenAPI plugin responses for those
that are returning JSON. If this function is given plain arguments,
then these are rendered as a JSON list. If it's given keyword
arguments then these are rendered as a JSON dict."""
arg = args or kwargs
return jsonutils.dumps(arg)
class Failure(Exception):
def __init__(self, details):
self.details = details
def __str__(self):
try:
return str(self.details)
except Exception:
return "XenAPI Fake Failure: %s" % str(self.details)
def _details_map(self):
return dict([(str(i), self.details[i])
for i in range(len(self.details))])
class SessionBase(object):
"""
Base class for Fake Sessions
"""
def __init__(self, uri):
self._session = None
def pool_get_default_SR(self, _1, pool_ref):
return 'FAKE DEFAULT SR'
def VBD_plug(self, _1, ref):
rec = get_record('VBD', ref)
if rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_ATTACHED', ref])
rec['currently_attached'] = True
rec['device'] = rec['userdevice']
def VBD_unplug(self, _1, ref):
rec = get_record('VBD', ref)
if not rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_DETACHED', ref])
rec['currently_attached'] = False
rec['device'] = ''
def PBD_create(self, _1, pbd_rec):
pbd_ref = _create_object('PBD', pbd_rec)
_db_content['PBD'][pbd_ref]['currently_attached'] = False
return pbd_ref
def PBD_plug(self, _1, pbd_ref):
rec = get_record('PBD', pbd_ref)
if rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_ATTACHED', rec])
rec['currently_attached'] = True
sr_ref = rec['SR']
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
def PBD_unplug(self, _1, pbd_ref):
rec = get_record('PBD', pbd_ref)
if not rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_DETACHED', rec])
rec['currently_attached'] = False
sr_ref = pbd_ref['SR']
_db_content['SR'][sr_ref]['PBDs'].remove(pbd_ref)
def SR_introduce(self, _1, sr_uuid, label, desc, type, content_type,
shared, sm_config):
ref = None
rec = None
for ref, rec in _db_content['SR'].iteritems():
if rec.get('uuid') == sr_uuid:
break
if rec:
# make forgotten = 0 and return ref
_db_content['SR'][ref]['forgotten'] = 0
return ref
else:
# SR not found in db, so we create one
params = {}
params.update(locals())
del params['self']
sr_ref = _create_object('SR', params)
_db_content['SR'][sr_ref]['uuid'] = sr_uuid
_db_content['SR'][sr_ref]['forgotten'] = 0
if type in ('iscsi'):
# Just to be clear
vdi_per_lun = True
if vdi_per_lun:
# we need to create a vdi because this introduce
# is likely meant for a single vdi
vdi_ref = create_vdi('', sr_ref)
_db_content['SR'][sr_ref]['VDIs'] = [vdi_ref]
_db_content['VDI'][vdi_ref]['SR'] = sr_ref
return sr_ref
def SR_forget(self, _1, sr_ref):
_db_content['SR'][sr_ref]['forgotten'] = 1
def SR_scan(self, _1, sr_ref):
return
def PIF_get_all_records_where(self, _1, _2):
# TODO(salvatore-orlando): filter table on _2
return _db_content['PIF']
def VM_get_xenstore_data(self, _1, vm_ref):
return _db_content['VM'][vm_ref].get('xenstore_data', {})
def VM_remove_from_xenstore_data(self, _1, vm_ref, key):
db_ref = _db_content['VM'][vm_ref]
if not 'xenstore_data' in db_ref:
return
if key in db_ref['xenstore_data']:
del db_ref['xenstore_data'][key]
def VM_add_to_xenstore_data(self, _1, vm_ref, key, value):
db_ref = _db_content['VM'][vm_ref]
if not 'xenstore_data' in db_ref:
db_ref['xenstore_data'] = {}
db_ref['xenstore_data'][key] = value
def VM_pool_migrate(self, _1, vm_ref, host_ref, options):
pass
def VDI_remove_from_other_config(self, _1, vdi_ref, key):
db_ref = _db_content['VDI'][vdi_ref]
if not 'other_config' in db_ref:
return
if key in db_ref['other_config']:
del db_ref['other_config'][key]
def VDI_add_to_other_config(self, _1, vdi_ref, key, value):
db_ref = _db_content['VDI'][vdi_ref]
if not 'other_config' in db_ref:
db_ref['other_config'] = {}
if key in db_ref['other_config']:
raise Failure(['MAP_DUPLICATE_KEY', 'VDI', 'other_config',
vdi_ref, key])
db_ref['other_config'][key] = value
def VDI_copy(self, _1, vdi_to_copy_ref, sr_ref):
db_ref = _db_content['VDI'][vdi_to_copy_ref]
name_label = db_ref['name_label']
read_only = db_ref['read_only']
sharable = db_ref['sharable']
other_config = db_ref['other_config'].copy()
return create_vdi(name_label, sr_ref, sharable=sharable,
read_only=read_only, other_config=other_config)
def VDI_clone(self, _1, vdi_to_clone_ref):
db_ref = _db_content['VDI'][vdi_to_clone_ref]
sr_ref = db_ref['SR']
return self.VDI_copy(_1, vdi_to_clone_ref, sr_ref)
def host_compute_free_memory(self, _1, ref):
#Always return 12GB available
return 12 * 1024 * 1024 * 1024
def host_call_plugin(self, _1, _2, plugin, method, _5):
if (plugin, method) == ('agent', 'version'):
return as_json(returncode='0', message='1.0')
elif (plugin, method) == ('agent', 'key_init'):
return as_json(returncode='D0', message='1')
elif (plugin, method) == ('agent', 'password'):
return as_json(returncode='0', message='success')
elif (plugin, method) == ('agent', 'resetnetwork'):
return as_json(returncode='0', message='success')
elif (plugin, method) == ('glance', 'upload_vhd'):
return ''
elif (plugin, method) == ('kernel', 'copy_vdi'):
return ''
elif (plugin, method) == ('kernel', 'create_kernel_ramdisk'):
return ''
elif (plugin, method) == ('kernel', 'remove_kernel_ramdisk'):
return ''
elif (plugin, method) == ('migration', 'move_vhds_into_sr'):
return ''
elif (plugin, method) == ('migration', 'transfer_vhd'):
return ''
elif (plugin, method) == ('xenhost', 'host_data'):
return jsonutils.dumps({'host_memory': {'total': 10,
'overhead': 20,
'free': 30,
'free-computed': 40}, })
elif (plugin == 'xenhost' and method in ['host_reboot',
'host_startup',
'host_shutdown']):
return jsonutils.dumps({"power_action": method[5:]})
elif (plugin, method) == ('xenhost', 'set_host_enabled'):
enabled = 'enabled' if _5.get('enabled') == 'true' else 'disabled'
return jsonutils.dumps({"status": enabled})
elif (plugin, method) == ('xenhost', 'host_uptime'):
return jsonutils.dumps({"uptime": "fake uptime"})
else:
raise Exception('No simulation in host_call_plugin for %s,%s' %
(plugin, method))
def VDI_get_virtual_size(self, *args):
return 1 * 1024 * 1024 * 1024
def VDI_resize_online(self, *args):
return 'derp'
VDI_resize = VDI_resize_online
def _VM_reboot(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
if db_ref['power_state'] != 'Running':
raise Failure(['VM_BAD_POWER_STATE',
'fake-opaque-ref', db_ref['power_state'].lower(), 'halted'])
db_ref['power_state'] = 'Running'
def VM_clean_reboot(self, session, vm_ref):
return self._VM_reboot(session, vm_ref)
def VM_hard_reboot(self, session, vm_ref):
return self._VM_reboot(session, vm_ref)
def VM_hard_shutdown(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
db_ref['power_state'] = 'Halted'
VM_clean_shutdown = VM_hard_shutdown
def pool_eject(self, session, host_ref):
pass
def pool_join(self, session, hostname, username, password):
pass
def pool_set_name_label(self, session, pool_ref, name):
pass
def network_get_all_records_where(self, _1, filter):
return self.xenapi.network.get_all_records()
def xenapi_request(self, methodname, params):
if methodname.startswith('login'):
self._login(methodname, params)
return None
elif methodname == 'logout' or methodname == 'session.logout':
self._logout()
return None
else:
full_params = (self._session,) + params
meth = getattr(self, methodname, None)
if meth is None:
LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError(
_('xenapi.fake does not have an implementation for %s') %
methodname)
return meth(*full_params)
def _login(self, method, params):
self._session = str(uuid.uuid4())
_session_info = {'uuid': str(uuid.uuid4()),
'this_host': _db_content['host'].keys()[0]}
_db_content['session'][self._session] = _session_info
def _logout(self):
s = self._session
self._session = None
if s not in _db_content['session']:
raise exception.NovaException(
"Logging out a session that is invalid or already logged "
"out: %s" % s)
del _db_content['session'][s]
def __getattr__(self, name):
if name == 'handle':
return self._session
elif name == 'xenapi':
return _Dispatcher(self.xenapi_request, None)
elif name.startswith('login') or name.startswith('slave_local'):
return lambda *params: self._login(name, params)
elif name.startswith('Async'):
return lambda *params: self._async(name, params)
elif '.' in name:
impl = getattr(self, name.replace('.', '_'))
if impl is not None:
def callit(*params):
localname = name
LOG.debug(_('Calling %(localname)s %(impl)s') % locals())
self._check_session(params)
return impl(*params)
return callit
if self._is_gettersetter(name, True):
LOG.debug(_('Calling getter %s'), name)
return lambda *params: self._getter(name, params)
elif self._is_gettersetter(name, False):
LOG.debug(_('Calling setter %s'), name)
return lambda *params: self._setter(name, params)
elif self._is_create(name):
return lambda *params: self._create(name, params)
elif self._is_destroy(name):
return lambda *params: self._destroy(name, params)
else:
return None
def _is_gettersetter(self, name, getter):
bits = name.split('.')
return (len(bits) == 2 and
bits[0] in _CLASSES and
bits[1].startswith(getter and 'get_' or 'set_'))
def _is_create(self, name):
return self._is_method(name, 'create')
def _is_destroy(self, name):
return self._is_method(name, 'destroy')
def _is_method(self, name, meth):
bits = name.split('.')
return (len(bits) == 2 and
bits[0] in _CLASSES and
bits[1] == meth)
def _getter(self, name, params):
self._check_session(params)
(cls, func) = name.split('.')
if func == 'get_all':
self._check_arg_count(params, 1)
return get_all(cls)
if func == 'get_all_records':
self._check_arg_count(params, 1)
return get_all_records(cls)
if func == 'get_record':
self._check_arg_count(params, 2)
return get_record(cls, params[1])
if func in ('get_by_name_label', 'get_by_uuid'):
self._check_arg_count(params, 2)
return_singleton = (func == 'get_by_uuid')
return self._get_by_field(
_db_content[cls], func[len('get_by_'):], params[1],
return_singleton=return_singleton)
if len(params) == 2:
field = func[len('get_'):]
ref = params[1]
if (ref in _db_content[cls]):
if (field in _db_content[cls][ref]):
return _db_content[cls][ref][field]
else:
raise Failure(['HANDLE_INVALID', cls, ref])
LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError(
_('xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments') % name)
def _setter(self, name, params):
self._check_session(params)
(cls, func) = name.split('.')
if len(params) == 3:
field = func[len('set_'):]
ref = params[1]
val = params[2]
if (ref in _db_content[cls] and
field in _db_content[cls][ref]):
_db_content[cls][ref][field] = val
return
LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError(
'xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments or the database '
'is missing that field' % name)
def _create(self, name, params):
self._check_session(params)
is_sr_create = name == 'SR.create'
is_vlan_create = name == 'VLAN.create'
# Storage Repositories have a different API
expected = is_sr_create and 10 or is_vlan_create and 4 or 2
self._check_arg_count(params, expected)
(cls, _) = name.split('.')
ref = (is_sr_create and
_create_sr(cls, params) or
is_vlan_create and
_create_vlan(params[1], params[2], params[3]) or
_create_object(cls, params[1]))
# Call hook to provide any fixups needed (ex. creating backrefs)
after_hook = 'after_%s_create' % cls
if after_hook in globals():
globals()[after_hook](ref, params[1])
obj = get_record(cls, ref)
# Add RO fields
if cls == 'VM':
obj['power_state'] = 'Halted'
return ref
def _destroy(self, name, params):
self._check_session(params)
self._check_arg_count(params, 2)
table = name.split('.')[0]
ref = params[1]
if ref not in _db_content[table]:
raise Failure(['HANDLE_INVALID', table, ref])
# Call destroy function (if exists)
destroy_func = globals().get('destroy_%s' % table.lower())
if destroy_func:
destroy_func(ref)
else:
del _db_content[table][ref]
def _async(self, name, params):
task_ref = create_task(name)
task = _db_content['task'][task_ref]
func = name[len('Async.'):]
try:
result = self.xenapi_request(func, params[1:])
if result:
result = as_value(result)
task['result'] = result
task['status'] = 'success'
except Failure, exc:
task['error_info'] = exc.details
task['status'] = 'failed'
task['finished'] = timeutils.utcnow()
return task_ref
def _check_session(self, params):
if (self._session is None or
self._session not in _db_content['session']):
raise Failure(['HANDLE_INVALID', 'session', self._session])
if len(params) == 0 or params[0] != self._session:
LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError('Call to XenAPI without using .xenapi')
def _check_arg_count(self, params, expected):
actual = len(params)
if actual != expected:
raise Failure(['MESSAGE_PARAMETER_COUNT_MISMATCH',
expected, actual])
def _get_by_field(self, recs, k, v, return_singleton):
result = []
for ref, rec in recs.iteritems():
if rec.get(k) == v:
result.append(ref)
if return_singleton:
try:
return result[0]
except IndexError:
raise Failure(['UUID_INVALID', v, result, recs, k])
return result
# Based upon _Method from xmlrpclib.
class _Dispatcher:
def __init__(self, send, name):
self.__send = send
self.__name = name
def __repr__(self):
if self.__name:
return '<xenapi.fake._Dispatcher for %s>' % self.__name
else:
return '<xenapi.fake._Dispatcher>'
def __getattr__(self, name):
if self.__name is None:
return _Dispatcher(self.__send, name)
else:
return _Dispatcher(self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
|
|
import numpy as np
from numpy import array, sqrt
from numpy.testing import (assert_array_almost_equal, assert_equal,
assert_almost_equal, assert_allclose)
from pytest import raises as assert_raises
from scipy import integrate
import scipy.special as sc
from scipy.special import gamma
import scipy.special.orthogonal as orth
class TestCheby(object):
def test_chebyc(self):
C0 = orth.chebyc(0)
C1 = orth.chebyc(1)
olderr = np.seterr(all='ignore')
try:
C2 = orth.chebyc(2)
C3 = orth.chebyc(3)
C4 = orth.chebyc(4)
C5 = orth.chebyc(5)
finally:
np.seterr(**olderr)
assert_array_almost_equal(C0.c,[2],13)
assert_array_almost_equal(C1.c,[1,0],13)
assert_array_almost_equal(C2.c,[1,0,-2],13)
assert_array_almost_equal(C3.c,[1,0,-3,0],13)
assert_array_almost_equal(C4.c,[1,0,-4,0,2],13)
assert_array_almost_equal(C5.c,[1,0,-5,0,5,0],13)
def test_chebys(self):
S0 = orth.chebys(0)
S1 = orth.chebys(1)
S2 = orth.chebys(2)
S3 = orth.chebys(3)
S4 = orth.chebys(4)
S5 = orth.chebys(5)
assert_array_almost_equal(S0.c,[1],13)
assert_array_almost_equal(S1.c,[1,0],13)
assert_array_almost_equal(S2.c,[1,0,-1],13)
assert_array_almost_equal(S3.c,[1,0,-2,0],13)
assert_array_almost_equal(S4.c,[1,0,-3,0,1],13)
assert_array_almost_equal(S5.c,[1,0,-4,0,3,0],13)
def test_chebyt(self):
T0 = orth.chebyt(0)
T1 = orth.chebyt(1)
T2 = orth.chebyt(2)
T3 = orth.chebyt(3)
T4 = orth.chebyt(4)
T5 = orth.chebyt(5)
assert_array_almost_equal(T0.c,[1],13)
assert_array_almost_equal(T1.c,[1,0],13)
assert_array_almost_equal(T2.c,[2,0,-1],13)
assert_array_almost_equal(T3.c,[4,0,-3,0],13)
assert_array_almost_equal(T4.c,[8,0,-8,0,1],13)
assert_array_almost_equal(T5.c,[16,0,-20,0,5,0],13)
def test_chebyu(self):
U0 = orth.chebyu(0)
U1 = orth.chebyu(1)
U2 = orth.chebyu(2)
U3 = orth.chebyu(3)
U4 = orth.chebyu(4)
U5 = orth.chebyu(5)
assert_array_almost_equal(U0.c,[1],13)
assert_array_almost_equal(U1.c,[2,0],13)
assert_array_almost_equal(U2.c,[4,0,-1],13)
assert_array_almost_equal(U3.c,[8,0,-4,0],13)
assert_array_almost_equal(U4.c,[16,0,-12,0,1],13)
assert_array_almost_equal(U5.c,[32,0,-32,0,6,0],13)
class TestGegenbauer(object):
def test_gegenbauer(self):
a = 5*np.random.random() - 0.5
if np.any(a == 0):
a = -0.2
Ca0 = orth.gegenbauer(0,a)
Ca1 = orth.gegenbauer(1,a)
Ca2 = orth.gegenbauer(2,a)
Ca3 = orth.gegenbauer(3,a)
Ca4 = orth.gegenbauer(4,a)
Ca5 = orth.gegenbauer(5,a)
assert_array_almost_equal(Ca0.c,array([1]),13)
assert_array_almost_equal(Ca1.c,array([2*a,0]),13)
assert_array_almost_equal(Ca2.c,array([2*a*(a+1),0,-a]),13)
assert_array_almost_equal(Ca3.c,array([4*sc.poch(a,3),0,-6*a*(a+1),
0])/3.0,11)
assert_array_almost_equal(Ca4.c,array([4*sc.poch(a,4),0,-12*sc.poch(a,3),
0,3*a*(a+1)])/6.0,11)
assert_array_almost_equal(Ca5.c,array([4*sc.poch(a,5),0,-20*sc.poch(a,4),
0,15*sc.poch(a,3),0])/15.0,11)
class TestHermite(object):
def test_hermite(self):
H0 = orth.hermite(0)
H1 = orth.hermite(1)
H2 = orth.hermite(2)
H3 = orth.hermite(3)
H4 = orth.hermite(4)
H5 = orth.hermite(5)
assert_array_almost_equal(H0.c,[1],13)
assert_array_almost_equal(H1.c,[2,0],13)
assert_array_almost_equal(H2.c,[4,0,-2],13)
assert_array_almost_equal(H3.c,[8,0,-12,0],13)
assert_array_almost_equal(H4.c,[16,0,-48,0,12],12)
assert_array_almost_equal(H5.c,[32,0,-160,0,120,0],12)
def test_hermitenorm(self):
# He_n(x) = 2**(-n/2) H_n(x/sqrt(2))
psub = np.poly1d([1.0/sqrt(2),0])
H0 = orth.hermitenorm(0)
H1 = orth.hermitenorm(1)
H2 = orth.hermitenorm(2)
H3 = orth.hermitenorm(3)
H4 = orth.hermitenorm(4)
H5 = orth.hermitenorm(5)
he0 = orth.hermite(0)(psub)
he1 = orth.hermite(1)(psub) / sqrt(2)
he2 = orth.hermite(2)(psub) / 2.0
he3 = orth.hermite(3)(psub) / (2*sqrt(2))
he4 = orth.hermite(4)(psub) / 4.0
he5 = orth.hermite(5)(psub) / (4.0*sqrt(2))
assert_array_almost_equal(H0.c,he0.c,13)
assert_array_almost_equal(H1.c,he1.c,13)
assert_array_almost_equal(H2.c,he2.c,13)
assert_array_almost_equal(H3.c,he3.c,13)
assert_array_almost_equal(H4.c,he4.c,13)
assert_array_almost_equal(H5.c,he5.c,13)
class _test_sh_legendre(object):
def test_sh_legendre(self):
# P*_n(x) = P_n(2x-1)
psub = np.poly1d([2,-1])
Ps0 = orth.sh_legendre(0)
Ps1 = orth.sh_legendre(1)
Ps2 = orth.sh_legendre(2)
Ps3 = orth.sh_legendre(3)
Ps4 = orth.sh_legendre(4)
Ps5 = orth.sh_legendre(5)
pse0 = orth.legendre(0)(psub)
pse1 = orth.legendre(1)(psub)
pse2 = orth.legendre(2)(psub)
pse3 = orth.legendre(3)(psub)
pse4 = orth.legendre(4)(psub)
pse5 = orth.legendre(5)(psub)
assert_array_almost_equal(Ps0.c,pse0.c,13)
assert_array_almost_equal(Ps1.c,pse1.c,13)
assert_array_almost_equal(Ps2.c,pse2.c,13)
assert_array_almost_equal(Ps3.c,pse3.c,13)
assert_array_almost_equal(Ps4.c,pse4.c,12)
assert_array_almost_equal(Ps5.c,pse5.c,12)
class _test_sh_chebyt(object):
def test_sh_chebyt(self):
# T*_n(x) = T_n(2x-1)
psub = np.poly1d([2,-1])
Ts0 = orth.sh_chebyt(0)
Ts1 = orth.sh_chebyt(1)
Ts2 = orth.sh_chebyt(2)
Ts3 = orth.sh_chebyt(3)
Ts4 = orth.sh_chebyt(4)
Ts5 = orth.sh_chebyt(5)
tse0 = orth.chebyt(0)(psub)
tse1 = orth.chebyt(1)(psub)
tse2 = orth.chebyt(2)(psub)
tse3 = orth.chebyt(3)(psub)
tse4 = orth.chebyt(4)(psub)
tse5 = orth.chebyt(5)(psub)
assert_array_almost_equal(Ts0.c,tse0.c,13)
assert_array_almost_equal(Ts1.c,tse1.c,13)
assert_array_almost_equal(Ts2.c,tse2.c,13)
assert_array_almost_equal(Ts3.c,tse3.c,13)
assert_array_almost_equal(Ts4.c,tse4.c,12)
assert_array_almost_equal(Ts5.c,tse5.c,12)
class _test_sh_chebyu(object):
def test_sh_chebyu(self):
# U*_n(x) = U_n(2x-1)
psub = np.poly1d([2,-1])
Us0 = orth.sh_chebyu(0)
Us1 = orth.sh_chebyu(1)
Us2 = orth.sh_chebyu(2)
Us3 = orth.sh_chebyu(3)
Us4 = orth.sh_chebyu(4)
Us5 = orth.sh_chebyu(5)
use0 = orth.chebyu(0)(psub)
use1 = orth.chebyu(1)(psub)
use2 = orth.chebyu(2)(psub)
use3 = orth.chebyu(3)(psub)
use4 = orth.chebyu(4)(psub)
use5 = orth.chebyu(5)(psub)
assert_array_almost_equal(Us0.c,use0.c,13)
assert_array_almost_equal(Us1.c,use1.c,13)
assert_array_almost_equal(Us2.c,use2.c,13)
assert_array_almost_equal(Us3.c,use3.c,13)
assert_array_almost_equal(Us4.c,use4.c,12)
assert_array_almost_equal(Us5.c,use5.c,11)
class _test_sh_jacobi(object):
def test_sh_jacobi(self):
# G^(p,q)_n(x) = n! gamma(n+p)/gamma(2*n+p) * P^(p-q,q-1)_n(2*x-1)
conv = lambda n,p: gamma(n+1)*gamma(n+p)/gamma(2*n+p)
psub = np.poly1d([2,-1])
q = 4 * np.random.random()
p = q-1 + 2*np.random.random()
# print("shifted jacobi p,q = ", p, q)
G0 = orth.sh_jacobi(0,p,q)
G1 = orth.sh_jacobi(1,p,q)
G2 = orth.sh_jacobi(2,p,q)
G3 = orth.sh_jacobi(3,p,q)
G4 = orth.sh_jacobi(4,p,q)
G5 = orth.sh_jacobi(5,p,q)
ge0 = orth.jacobi(0,p-q,q-1)(psub) * conv(0,p)
ge1 = orth.jacobi(1,p-q,q-1)(psub) * conv(1,p)
ge2 = orth.jacobi(2,p-q,q-1)(psub) * conv(2,p)
ge3 = orth.jacobi(3,p-q,q-1)(psub) * conv(3,p)
ge4 = orth.jacobi(4,p-q,q-1)(psub) * conv(4,p)
ge5 = orth.jacobi(5,p-q,q-1)(psub) * conv(5,p)
assert_array_almost_equal(G0.c,ge0.c,13)
assert_array_almost_equal(G1.c,ge1.c,13)
assert_array_almost_equal(G2.c,ge2.c,13)
assert_array_almost_equal(G3.c,ge3.c,13)
assert_array_almost_equal(G4.c,ge4.c,13)
assert_array_almost_equal(G5.c,ge5.c,13)
class TestCall(object):
def test_call(self):
poly = []
for n in range(5):
poly.extend([x.strip() for x in
("""
orth.jacobi(%(n)d,0.3,0.9)
orth.sh_jacobi(%(n)d,0.3,0.9)
orth.genlaguerre(%(n)d,0.3)
orth.laguerre(%(n)d)
orth.hermite(%(n)d)
orth.hermitenorm(%(n)d)
orth.gegenbauer(%(n)d,0.3)
orth.chebyt(%(n)d)
orth.chebyu(%(n)d)
orth.chebyc(%(n)d)
orth.chebys(%(n)d)
orth.sh_chebyt(%(n)d)
orth.sh_chebyu(%(n)d)
orth.legendre(%(n)d)
orth.sh_legendre(%(n)d)
""" % dict(n=n)).split()
])
olderr = np.seterr(all='ignore')
try:
for pstr in poly:
p = eval(pstr)
assert_almost_equal(p(0.315), np.poly1d(p.coef)(0.315),
err_msg=pstr)
finally:
np.seterr(**olderr)
class TestGenlaguerre(object):
def test_regression(self):
assert_equal(orth.genlaguerre(1, 1, monic=False)(0), 2.)
assert_equal(orth.genlaguerre(1, 1, monic=True)(0), -2.)
assert_equal(orth.genlaguerre(1, 1, monic=False), np.poly1d([-1, 2]))
assert_equal(orth.genlaguerre(1, 1, monic=True), np.poly1d([1, -2]))
def verify_gauss_quad(root_func, eval_func, weight_func, a, b, N,
rtol=1e-15, atol=1e-14):
# this test is copied from numpy's TestGauss in test_hermite.py
x, w, mu = root_func(N, True)
n = np.arange(N)
v = eval_func(n[:,np.newaxis], x)
vv = np.dot(v*w, v.T)
vd = 1 / np.sqrt(vv.diagonal())
vv = vd[:, np.newaxis] * vv * vd
assert_allclose(vv, np.eye(N), rtol, atol)
# check that the integral of 1 is correct
assert_allclose(w.sum(), mu, rtol, atol)
# compare the results of integrating a function with quad.
f = lambda x: x**3 - 3*x**2 + x - 2
resI = integrate.quad(lambda x: f(x)*weight_func(x), a, b)
resG = np.vdot(f(x), w)
rtol = 1e-6 if 1e-6 < resI[1] else resI[1] * 10
assert_allclose(resI[0], resG, rtol=rtol)
def test_roots_jacobi():
rf = lambda a, b: lambda n, mu: sc.roots_jacobi(n, a, b, mu)
ef = lambda a, b: lambda n, x: sc.eval_jacobi(n, a, b, x)
wf = lambda a, b: lambda x: (1 - x)**a * (1 + x)**b
vgq = verify_gauss_quad
vgq(rf(-0.5, -0.75), ef(-0.5, -0.75), wf(-0.5, -0.75), -1., 1., 5)
vgq(rf(-0.5, -0.75), ef(-0.5, -0.75), wf(-0.5, -0.75), -1., 1.,
25, atol=1e-12)
vgq(rf(-0.5, -0.75), ef(-0.5, -0.75), wf(-0.5, -0.75), -1., 1.,
100, atol=1e-11)
vgq(rf(0.5, -0.5), ef(0.5, -0.5), wf(0.5, -0.5), -1., 1., 5)
vgq(rf(0.5, -0.5), ef(0.5, -0.5), wf(0.5, -0.5), -1., 1., 25, atol=1.5e-13)
vgq(rf(0.5, -0.5), ef(0.5, -0.5), wf(0.5, -0.5), -1., 1., 100, atol=2e-12)
vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), -1., 1., 5, atol=2e-13)
vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), -1., 1., 25, atol=2e-13)
vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), -1., 1., 100, atol=1e-12)
vgq(rf(0.9, 2), ef(0.9, 2), wf(0.9, 2), -1., 1., 5)
vgq(rf(0.9, 2), ef(0.9, 2), wf(0.9, 2), -1., 1., 25, atol=1e-13)
vgq(rf(0.9, 2), ef(0.9, 2), wf(0.9, 2), -1., 1., 100, atol=3e-13)
vgq(rf(18.24, 27.3), ef(18.24, 27.3), wf(18.24, 27.3), -1., 1., 5)
vgq(rf(18.24, 27.3), ef(18.24, 27.3), wf(18.24, 27.3), -1., 1., 25,
atol=1.1e-14)
vgq(rf(18.24, 27.3), ef(18.24, 27.3), wf(18.24, 27.3), -1., 1.,
100, atol=1e-13)
vgq(rf(47.1, -0.2), ef(47.1, -0.2), wf(47.1, -0.2), -1., 1., 5, atol=1e-13)
vgq(rf(47.1, -0.2), ef(47.1, -0.2), wf(47.1, -0.2), -1., 1., 25, atol=2e-13)
vgq(rf(47.1, -0.2), ef(47.1, -0.2), wf(47.1, -0.2), -1., 1.,
100, atol=1e-11)
vgq(rf(2.25, 68.9), ef(2.25, 68.9), wf(2.25, 68.9), -1., 1., 5)
vgq(rf(2.25, 68.9), ef(2.25, 68.9), wf(2.25, 68.9), -1., 1., 25, atol=1e-13)
vgq(rf(2.25, 68.9), ef(2.25, 68.9), wf(2.25, 68.9), -1., 1.,
100, atol=1e-13)
# when alpha == beta == 0, P_n^{a,b}(x) == P_n(x)
xj, wj = sc.roots_jacobi(6, 0.0, 0.0)
xl, wl = sc.roots_legendre(6)
assert_allclose(xj, xl, 1e-14, 1e-14)
assert_allclose(wj, wl, 1e-14, 1e-14)
# when alpha == beta != 0, P_n^{a,b}(x) == C_n^{alpha+0.5}(x)
xj, wj = sc.roots_jacobi(6, 4.0, 4.0)
xc, wc = sc.roots_gegenbauer(6, 4.5)
assert_allclose(xj, xc, 1e-14, 1e-14)
assert_allclose(wj, wc, 1e-14, 1e-14)
x, w = sc.roots_jacobi(5, 2, 3, False)
y, v, m = sc.roots_jacobi(5, 2, 3, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(wf(2,3), -1, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_jacobi, 0, 1, 1)
assert_raises(ValueError, sc.roots_jacobi, 3.3, 1, 1)
assert_raises(ValueError, sc.roots_jacobi, 3, -2, 1)
assert_raises(ValueError, sc.roots_jacobi, 3, 1, -2)
assert_raises(ValueError, sc.roots_jacobi, 3, -2, -2)
def test_roots_sh_jacobi():
rf = lambda a, b: lambda n, mu: sc.roots_sh_jacobi(n, a, b, mu)
ef = lambda a, b: lambda n, x: sc.eval_sh_jacobi(n, a, b, x)
wf = lambda a, b: lambda x: (1. - x)**(a - b) * (x)**(b - 1.)
vgq = verify_gauss_quad
vgq(rf(-0.5, 0.25), ef(-0.5, 0.25), wf(-0.5, 0.25), 0., 1., 5)
vgq(rf(-0.5, 0.25), ef(-0.5, 0.25), wf(-0.5, 0.25), 0., 1.,
25, atol=1e-12)
vgq(rf(-0.5, 0.25), ef(-0.5, 0.25), wf(-0.5, 0.25), 0., 1.,
100, atol=1e-11)
vgq(rf(0.5, 0.5), ef(0.5, 0.5), wf(0.5, 0.5), 0., 1., 5)
vgq(rf(0.5, 0.5), ef(0.5, 0.5), wf(0.5, 0.5), 0., 1., 25, atol=1e-13)
vgq(rf(0.5, 0.5), ef(0.5, 0.5), wf(0.5, 0.5), 0., 1., 100, atol=1e-12)
vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), 0., 1., 5)
vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), 0., 1., 25, atol=1.5e-13)
vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), 0., 1., 100, atol=2e-12)
vgq(rf(2, 0.9), ef(2, 0.9), wf(2, 0.9), 0., 1., 5)
vgq(rf(2, 0.9), ef(2, 0.9), wf(2, 0.9), 0., 1., 25, atol=1e-13)
vgq(rf(2, 0.9), ef(2, 0.9), wf(2, 0.9), 0., 1., 100, atol=1e-12)
vgq(rf(27.3, 18.24), ef(27.3, 18.24), wf(27.3, 18.24), 0., 1., 5)
vgq(rf(27.3, 18.24), ef(27.3, 18.24), wf(27.3, 18.24), 0., 1., 25)
vgq(rf(27.3, 18.24), ef(27.3, 18.24), wf(27.3, 18.24), 0., 1.,
100, atol=1e-13)
vgq(rf(47.1, 0.2), ef(47.1, 0.2), wf(47.1, 0.2), 0., 1., 5, atol=1e-12)
vgq(rf(47.1, 0.2), ef(47.1, 0.2), wf(47.1, 0.2), 0., 1., 25, atol=1e-11)
vgq(rf(47.1, 0.2), ef(47.1, 0.2), wf(47.1, 0.2), 0., 1., 100, atol=1e-10)
vgq(rf(68.9, 2.25), ef(68.9, 2.25), wf(68.9, 2.25), 0., 1., 5, atol=3.5e-14)
vgq(rf(68.9, 2.25), ef(68.9, 2.25), wf(68.9, 2.25), 0., 1., 25, atol=2e-13)
vgq(rf(68.9, 2.25), ef(68.9, 2.25), wf(68.9, 2.25), 0., 1.,
100, atol=1e-12)
x, w = sc.roots_sh_jacobi(5, 3, 2, False)
y, v, m = sc.roots_sh_jacobi(5, 3, 2, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(wf(3,2), 0, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_sh_jacobi, 0, 1, 1)
assert_raises(ValueError, sc.roots_sh_jacobi, 3.3, 1, 1)
assert_raises(ValueError, sc.roots_sh_jacobi, 3, 1, 2) # p - q <= -1
assert_raises(ValueError, sc.roots_sh_jacobi, 3, 2, -1) # q <= 0
assert_raises(ValueError, sc.roots_sh_jacobi, 3, -2, -1) # both
def test_roots_hermite():
rootf = sc.roots_hermite
evalf = sc.eval_hermite
weightf = orth.hermite(5).weight_func
verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 5)
verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 25, atol=1e-13)
verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 100, atol=1e-12)
# Golub-Welsch branch
x, w = sc.roots_hermite(5, False)
y, v, m = sc.roots_hermite(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -np.inf, np.inf)
assert_allclose(m, muI, rtol=muI_err)
# Asymptotic branch (switch over at n >= 150)
x, w = sc.roots_hermite(200, False)
y, v, m = sc.roots_hermite(200, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
assert_allclose(sum(v), m, 1e-14, 1e-14)
assert_raises(ValueError, sc.roots_hermite, 0)
assert_raises(ValueError, sc.roots_hermite, 3.3)
def test_roots_hermite_asy():
# Recursion for Hermite functions
def hermite_recursion(n, nodes):
H = np.zeros((n, nodes.size))
H[0,:] = np.pi**(-0.25) * np.exp(-0.5*nodes**2)
if n > 1:
H[1,:] = sqrt(2.0) * nodes * H[0,:]
for k in range(2, n):
H[k,:] = sqrt(2.0/k) * nodes * H[k-1,:] - sqrt((k-1.0)/k) * H[k-2,:]
return H
# This tests only the nodes
def test(N, rtol=1e-15, atol=1e-14):
x, w = orth._roots_hermite_asy(N)
H = hermite_recursion(N+1, x)
assert_allclose(H[-1,:], np.zeros(N), rtol, atol)
assert_allclose(sum(w), sqrt(np.pi), rtol, atol)
test(150, atol=1e-12)
test(151, atol=1e-12)
test(300, atol=1e-12)
test(301, atol=1e-12)
test(500, atol=1e-12)
test(501, atol=1e-12)
test(999, atol=1e-12)
test(1000, atol=1e-12)
test(2000, atol=1e-12)
test(5000, atol=1e-12)
def test_roots_hermitenorm():
rootf = sc.roots_hermitenorm
evalf = sc.eval_hermitenorm
weightf = orth.hermitenorm(5).weight_func
verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 5)
verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 25, atol=1e-13)
verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 100, atol=1e-12)
x, w = sc.roots_hermitenorm(5, False)
y, v, m = sc.roots_hermitenorm(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -np.inf, np.inf)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_hermitenorm, 0)
assert_raises(ValueError, sc.roots_hermitenorm, 3.3)
def test_roots_gegenbauer():
rootf = lambda a: lambda n, mu: sc.roots_gegenbauer(n, a, mu)
evalf = lambda a: lambda n, x: sc.eval_gegenbauer(n, a, x)
weightf = lambda a: lambda x: (1 - x**2)**(a - 0.5)
vgq = verify_gauss_quad
vgq(rootf(-0.25), evalf(-0.25), weightf(-0.25), -1., 1., 5)
vgq(rootf(-0.25), evalf(-0.25), weightf(-0.25), -1., 1., 25, atol=1e-12)
vgq(rootf(-0.25), evalf(-0.25), weightf(-0.25), -1., 1., 100, atol=1e-11)
vgq(rootf(0.1), evalf(0.1), weightf(0.1), -1., 1., 5)
vgq(rootf(0.1), evalf(0.1), weightf(0.1), -1., 1., 25, atol=1e-13)
vgq(rootf(0.1), evalf(0.1), weightf(0.1), -1., 1., 100, atol=1e-12)
vgq(rootf(1), evalf(1), weightf(1), -1., 1., 5)
vgq(rootf(1), evalf(1), weightf(1), -1., 1., 25, atol=1e-13)
vgq(rootf(1), evalf(1), weightf(1), -1., 1., 100, atol=1e-12)
vgq(rootf(10), evalf(10), weightf(10), -1., 1., 5)
vgq(rootf(10), evalf(10), weightf(10), -1., 1., 25, atol=1e-13)
vgq(rootf(10), evalf(10), weightf(10), -1., 1., 100, atol=1e-12)
vgq(rootf(50), evalf(50), weightf(50), -1., 1., 5, atol=1e-13)
vgq(rootf(50), evalf(50), weightf(50), -1., 1., 25, atol=1e-12)
vgq(rootf(50), evalf(50), weightf(50), -1., 1., 100, atol=1e-11)
# this is a special case that the old code supported.
# when alpha = 0, the gegenbauer polynomial is uniformly 0. but it goes
# to a scaled down copy of T_n(x) there.
vgq(rootf(0), sc.eval_chebyt, weightf(0), -1., 1., 5)
vgq(rootf(0), sc.eval_chebyt, weightf(0), -1., 1., 25)
vgq(rootf(0), sc.eval_chebyt, weightf(0), -1., 1., 100, atol=1e-12)
x, w = sc.roots_gegenbauer(5, 2, False)
y, v, m = sc.roots_gegenbauer(5, 2, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf(2), -1, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_gegenbauer, 0, 2)
assert_raises(ValueError, sc.roots_gegenbauer, 3.3, 2)
assert_raises(ValueError, sc.roots_gegenbauer, 3, -.75)
def test_roots_chebyt():
weightf = orth.chebyt(5).weight_func
verify_gauss_quad(sc.roots_chebyt, sc.eval_chebyt, weightf, -1., 1., 5)
verify_gauss_quad(sc.roots_chebyt, sc.eval_chebyt, weightf, -1., 1., 25)
verify_gauss_quad(sc.roots_chebyt, sc.eval_chebyt, weightf, -1., 1., 100, atol=1e-12)
x, w = sc.roots_chebyt(5, False)
y, v, m = sc.roots_chebyt(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -1, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_chebyt, 0)
assert_raises(ValueError, sc.roots_chebyt, 3.3)
def test_chebyt_symmetry():
x, w = sc.roots_chebyt(21)
pos, neg = x[:10], x[11:]
assert_equal(neg, -pos[::-1])
assert_equal(x[10], 0)
def test_roots_chebyu():
weightf = orth.chebyu(5).weight_func
verify_gauss_quad(sc.roots_chebyu, sc.eval_chebyu, weightf, -1., 1., 5)
verify_gauss_quad(sc.roots_chebyu, sc.eval_chebyu, weightf, -1., 1., 25)
verify_gauss_quad(sc.roots_chebyu, sc.eval_chebyu, weightf, -1., 1., 100)
x, w = sc.roots_chebyu(5, False)
y, v, m = sc.roots_chebyu(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -1, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_chebyu, 0)
assert_raises(ValueError, sc.roots_chebyu, 3.3)
def test_roots_chebyc():
weightf = orth.chebyc(5).weight_func
verify_gauss_quad(sc.roots_chebyc, sc.eval_chebyc, weightf, -2., 2., 5)
verify_gauss_quad(sc.roots_chebyc, sc.eval_chebyc, weightf, -2., 2., 25)
verify_gauss_quad(sc.roots_chebyc, sc.eval_chebyc, weightf, -2., 2., 100, atol=1e-12)
x, w = sc.roots_chebyc(5, False)
y, v, m = sc.roots_chebyc(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -2, 2)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_chebyc, 0)
assert_raises(ValueError, sc.roots_chebyc, 3.3)
def test_roots_chebys():
weightf = orth.chebys(5).weight_func
verify_gauss_quad(sc.roots_chebys, sc.eval_chebys, weightf, -2., 2., 5)
verify_gauss_quad(sc.roots_chebys, sc.eval_chebys, weightf, -2., 2., 25)
verify_gauss_quad(sc.roots_chebys, sc.eval_chebys, weightf, -2., 2., 100)
x, w = sc.roots_chebys(5, False)
y, v, m = sc.roots_chebys(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -2, 2)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_chebys, 0)
assert_raises(ValueError, sc.roots_chebys, 3.3)
def test_roots_sh_chebyt():
weightf = orth.sh_chebyt(5).weight_func
verify_gauss_quad(sc.roots_sh_chebyt, sc.eval_sh_chebyt, weightf, 0., 1., 5)
verify_gauss_quad(sc.roots_sh_chebyt, sc.eval_sh_chebyt, weightf, 0., 1., 25)
verify_gauss_quad(sc.roots_sh_chebyt, sc.eval_sh_chebyt, weightf, 0., 1.,
100, atol=1e-13)
x, w = sc.roots_sh_chebyt(5, False)
y, v, m = sc.roots_sh_chebyt(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, 0, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_sh_chebyt, 0)
assert_raises(ValueError, sc.roots_sh_chebyt, 3.3)
def test_roots_sh_chebyu():
weightf = orth.sh_chebyu(5).weight_func
verify_gauss_quad(sc.roots_sh_chebyu, sc.eval_sh_chebyu, weightf, 0., 1., 5)
verify_gauss_quad(sc.roots_sh_chebyu, sc.eval_sh_chebyu, weightf, 0., 1., 25)
verify_gauss_quad(sc.roots_sh_chebyu, sc.eval_sh_chebyu, weightf, 0., 1.,
100, atol=1e-13)
x, w = sc.roots_sh_chebyu(5, False)
y, v, m = sc.roots_sh_chebyu(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, 0, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_sh_chebyu, 0)
assert_raises(ValueError, sc.roots_sh_chebyu, 3.3)
def test_roots_legendre():
weightf = orth.legendre(5).weight_func
verify_gauss_quad(sc.roots_legendre, sc.eval_legendre, weightf, -1., 1., 5)
verify_gauss_quad(sc.roots_legendre, sc.eval_legendre, weightf, -1., 1.,
25, atol=1e-13)
verify_gauss_quad(sc.roots_legendre, sc.eval_legendre, weightf, -1., 1.,
100, atol=1e-12)
x, w = sc.roots_legendre(5, False)
y, v, m = sc.roots_legendre(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -1, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_legendre, 0)
assert_raises(ValueError, sc.roots_legendre, 3.3)
def test_roots_sh_legendre():
weightf = orth.sh_legendre(5).weight_func
verify_gauss_quad(sc.roots_sh_legendre, sc.eval_sh_legendre, weightf, 0., 1., 5)
verify_gauss_quad(sc.roots_sh_legendre, sc.eval_sh_legendre, weightf, 0., 1.,
25, atol=1e-13)
verify_gauss_quad(sc.roots_sh_legendre, sc.eval_sh_legendre, weightf, 0., 1.,
100, atol=1e-12)
x, w = sc.roots_sh_legendre(5, False)
y, v, m = sc.roots_sh_legendre(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, 0, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_sh_legendre, 0)
assert_raises(ValueError, sc.roots_sh_legendre, 3.3)
def test_roots_laguerre():
weightf = orth.laguerre(5).weight_func
verify_gauss_quad(sc.roots_laguerre, sc.eval_laguerre, weightf, 0., np.inf, 5)
verify_gauss_quad(sc.roots_laguerre, sc.eval_laguerre, weightf, 0., np.inf,
25, atol=1e-13)
verify_gauss_quad(sc.roots_laguerre, sc.eval_laguerre, weightf, 0., np.inf,
100, atol=1e-12)
x, w = sc.roots_laguerre(5, False)
y, v, m = sc.roots_laguerre(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, 0, np.inf)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_laguerre, 0)
assert_raises(ValueError, sc.roots_laguerre, 3.3)
def test_roots_genlaguerre():
rootf = lambda a: lambda n, mu: sc.roots_genlaguerre(n, a, mu)
evalf = lambda a: lambda n, x: sc.eval_genlaguerre(n, a, x)
weightf = lambda a: lambda x: x**a * np.exp(-x)
vgq = verify_gauss_quad
vgq(rootf(-0.5), evalf(-0.5), weightf(-0.5), 0., np.inf, 5)
vgq(rootf(-0.5), evalf(-0.5), weightf(-0.5), 0., np.inf, 25, atol=1e-13)
vgq(rootf(-0.5), evalf(-0.5), weightf(-0.5), 0., np.inf, 100, atol=1e-12)
vgq(rootf(0.1), evalf(0.1), weightf(0.1), 0., np.inf, 5)
vgq(rootf(0.1), evalf(0.1), weightf(0.1), 0., np.inf, 25, atol=1e-13)
vgq(rootf(0.1), evalf(0.1), weightf(0.1), 0., np.inf, 100, atol=1.6e-13)
vgq(rootf(1), evalf(1), weightf(1), 0., np.inf, 5)
vgq(rootf(1), evalf(1), weightf(1), 0., np.inf, 25, atol=1e-13)
vgq(rootf(1), evalf(1), weightf(1), 0., np.inf, 100, atol=1.03e-13)
vgq(rootf(10), evalf(10), weightf(10), 0., np.inf, 5)
vgq(rootf(10), evalf(10), weightf(10), 0., np.inf, 25, atol=1e-13)
vgq(rootf(10), evalf(10), weightf(10), 0., np.inf, 100, atol=1e-12)
vgq(rootf(50), evalf(50), weightf(50), 0., np.inf, 5)
vgq(rootf(50), evalf(50), weightf(50), 0., np.inf, 25, atol=1e-13)
vgq(rootf(50), evalf(50), weightf(50), 0., np.inf, 100, rtol=1e-14, atol=2e-13)
x, w = sc.roots_genlaguerre(5, 2, False)
y, v, m = sc.roots_genlaguerre(5, 2, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf(2.), 0., np.inf)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_genlaguerre, 0, 2)
assert_raises(ValueError, sc.roots_genlaguerre, 3.3, 2)
assert_raises(ValueError, sc.roots_genlaguerre, 3, -1.1)
def test_gh_6721():
# Regresssion test for gh_6721. This should not raise.
sc.chebyt(65)(0.2)
|
|
import unittest
from os import path
import pysam
from cigar import Cigar
from mock import Mock
from pyfasta import Fasta
from clrsvsim.simulator import (
make_split_read,
modify_read,
modify_read_for_insertion,
invert_read,
unpack_cigar,
get_max_clip_len,
get_inverse_sequence,
overlap
)
TEST_DATA_DIR = path.join(path.dirname(path.realpath(__file__)), 'test_data')
class SplitReadTest(unittest.TestCase):
def test_make_split_read(self):
read = Mock()
read.seq = 'A' * 20
read.qual = '*' * len(read.seq)
read.rlen = len(read.seq)
read.qname = read.query_name = 'name'
read.reference_start = 100
read.cigarstring = '20M'
alternate_seq = 'C' * 10 + 'T' * 10
split_read = make_split_read(read, 5, True, sequence=alternate_seq)
self.assertEqual(split_read.seq, 'T' * 5 + 'A' * 15)
self.assertEqual(split_read.cigarstring, '5S15M')
split_read = make_split_read(read, 5, False, sequence=alternate_seq)
self.assertEqual(split_read.seq, 'A' * 5 + 'C' * 10 + 'T' * 5)
self.assertEqual(split_read.cigarstring, '5M15S')
split_read = make_split_read(read, 5, False, hard_clip_threshold=0.1, sequence=alternate_seq)
self.assertEqual(split_read.seq, 'A' * 5 + 'C' * 10 + 'T' * 5)
self.assertEqual(split_read.cigarstring, '5M15H')
split_read = make_split_read(read, 5, False, hard_clip_threshold=0.9, sequence=alternate_seq)
self.assertEqual(split_read.seq, 'A' * 5 + 'C' * 10 + 'T' * 5)
self.assertEqual(split_read.cigarstring, '5M15S')
def test_make_split_read_bam_file(self):
sorted_bam = path.join(TEST_DATA_DIR, 'sorted.bam')
with pysam.Samfile(sorted_bam, 'rb') as samfile:
for read in samfile:
if not read.cigarstring:
continue
for breakpoint in (10, 50, 100):
if breakpoint >= read.rlen:
continue
for is_left_split in (True, False):
split_read = make_split_read(read, breakpoint, is_left_split)
cigar_items = list(Cigar(split_read.cigarstring).items())
clipped_item = cigar_items[0] if is_left_split else cigar_items[-1]
min_clip_len = breakpoint if is_left_split else read.rlen - breakpoint # Can be longer if adjacent to another clip.
self.assertGreaterEqual(clipped_item[0], min_clip_len)
self.assertIn(clipped_item[1], ('S', 'H')) # Will be soft-clipped unless already hard-clipped.
def test_modify_read(self):
read = Mock()
read.seq = 'AAAAA'
read.qname = 'test'
# SNPs
modified, changes = modify_read(read, 1, 0, 0)
self.assertEqual(changes, len(modified.seq))
self.assertEqual(len(modified.seq), len(read.seq))
self.assertTrue(all([read.seq[i] != modified.seq[i] for i in range(len(read.seq))]))
# Insertions
modified, changes = modify_read(read, 0, 1, 0)
self.assertEqual(changes, len(read.seq))
self.assertEqual(len(modified.seq), len(read.seq) * 2)
# Deletions
modified, changes = modify_read(read, 0, 0, 1)
self.assertEqual(changes, len(read.seq))
self.assertEqual(len(modified.seq), 0)
def test_modify_read_for_insertion(self):
read = Mock()
read.seq = 'AAAAAA'
read.qual = '*' * len(read.seq)
read.qname = 'test'
read.rlen = len(read.seq)
read.reference_start = 100
read.cigarstring = '{}M'.format(read.rlen)
ins_position = 103
ins_seq = 'CCCCCCCC'
modified, changes = modify_read_for_insertion(read, ins_position, ins_seq, 0, 0)
self.assertEqual(changes, 0)
# Read can either be modified to be on the left or right of the insertion
self.assertIn(modified.seq, ('AAACCC', 'CCCAAA'))
self.assertIn(modified.cigarstring, ('3M3S', '3S3M'))
# Test padding
modified, _ = modify_read_for_insertion(read, ins_position, ins_seq, 0, 0, padding=2)
self.assertIn(modified.seq, ('AAAACC', 'CCAAAA'))
self.assertIn(modified.cigarstring, ('4M2S', '2S4M'))
# Insertion positions beyond the read boundaries should not modify it
for position in (0, 1000):
modified, _ = modify_read_for_insertion(read, position, ins_seq, 0, 0, )
self.assertEqual(read, modified)
# Test limiting the maximum clip length
modified, _ = modify_read_for_insertion(read, ins_position, ins_seq, 0, 0, max_clip_len=3)
self.assertIn(modified.cigarstring, ('3M3S', '3S3M')) # clip len below max - allowed
modified, _ = modify_read_for_insertion(read, ins_position, ins_seq, 0, 0, max_clip_len=2)
self.assertEqual(read, modified) # clip len above max - insertion should not happen
def test_unpack_cigar(self):
for bad_cigar_string in (None, '', 'ok', '1', '1s2m', '1S2'):
self.assertRaises(ValueError, unpack_cigar, bad_cigar_string)
for cigar, unpacked in [
('1M', ['1M']),
('2M', ['1M', '1M']),
('2M1S', ['1M', '1M', '1S']),
('100S', ['1S'] * 100)
]:
self.assertEqual(unpack_cigar(cigar), unpacked)
def test_get_max_clip_len(self):
read = Mock()
read.cigarstring = None
self.assertRaises(ValueError, get_max_clip_len, read)
for cigar, max_len in [
('4M', 0),
('1S2M', 1),
('1S1M2S', 2),
('2S1M1S', 2),
('1M1S', 1)
]:
read.cigarstring = cigar
self.assertEqual(get_max_clip_len(read), max_len)
def test_invert_read(self):
read = Mock()
read.seq = '123456'
read.qual = '*' * len(read.seq)
read.qname = 'test'
read.rlen = len(read.seq)
read.reference_start = 100
read.reference_end = read.reference_start + read.rlen
read.cigarstring = '{}M'.format(read.rlen)
def assert_inversion(read, start, end, sequence, expected_seq, expected_cigar):
inv, _ = invert_read(read, start, end, sequence, 0, 0)
msg_prefix = 'invert({}, {}--{})'.format(read.seq, start, end)
self.assertEqual(inv.seq, expected_seq, '{}: {} != {}'.format(msg_prefix, inv.seq, expected_seq))
self.assertEqual(inv.cigarstring, expected_cigar, '{}: {} != {}'.format(msg_prefix, inv.cigarstring, expected_cigar))
# Inversions that are fully within the read
for start, end, expected_seq, expected_cigar in [
# fully contained, away from borders
(102, 104, '124356', '2M4S'),
(101, 104, '143256', '4S2M'),
# fully contained, touching borders
(100, 104, '432156', '4S2M'),
(102, 106, '126543', '2M4S'),
# spanning exactly the read
(100, 106, '654321', '6S'),
# edge cases
(102, 102, '123456', '6M'),
(102, 103, '123456', '6M'),
]:
assert_inversion(read, start, end, '', expected_seq, expected_cigar)
# The sequence that's inverted in the entire genome; only a subset will appear in each read.
sequence = '9876543210'
for start, expected_seq, expected_cigar in [
# inversion ends before the read
(80, '123456', '6M'),
(90, '123456', '6M'),
# inversion starts before the read, and extends into it
(91, '023456', '1S5M'),
(92, '103456', '2S4M'),
(93, '210456', '3S3M'),
(94, '321056', '4S2M'),
(95, '432106', '5S1M'),
# read is fully contained in the inversion
(96, '543210', '6S'),
(97, '654321', '6S'),
(98, '765432', '6S'),
(99, '876543', '6S'),
(100, '987654', '6S'),
# inversion starts mid-read
(101, '198765', '1M5S'),
(102, '129876', '2M4S'),
(103, '123987', '3M3S'),
(104, '123498', '4M2S'),
(105, '123459', '5M1S'),
# inversion starts past the read
(106, '123456', '6M'),
(110, '123456', '6M'),
]:
assert_inversion(read, start, start + len(sequence), sequence, expected_seq, expected_cigar)
def test_get_inverse_sequence(self):
# Reads represented in this file (start position = 100):
#
# ACGTACGTAC
# ACGTCCGTAC
# CGTCCGTACT
# CGTCCGAACT
# GTCCGAACTT
# TCCGAACTTC
# CCGAACTTAA
# CCGAACTTAA
# CCGAACTTAG
# CGAACTTAGC
#
bam = path.join(TEST_DATA_DIR, 'sv_sim.bam')
self.assertEqual(get_inverse_sequence(bam, '1', 100, 102), 'GT')
self.assertEqual(get_inverse_sequence(bam, '1', 108, 111), 'AGT')
# Inversion of an area with no reads, no ref genome provided
self.assertEqual(get_inverse_sequence(bam, '1', 98, 102), 'GTNN')
self.assertEqual(get_inverse_sequence(bam, '1', 0, 100), 'N' * 100)
# Inversion of an area with no reads, ref genome provided
ref_genome_fa = Fasta(path.join(TEST_DATA_DIR, 'sv_sim.fa'))
self.assertEqual(get_inverse_sequence(bam, '1', 0, 4, ref_genome_fa), 'AAAA')
self.assertEqual(get_inverse_sequence(bam, '1', 98, 102, ref_genome_fa), 'GTAA')
def test_overlap(self):
self.assertEqual(overlap((0, 0), (0, 0)), 0)
self.assertEqual(overlap((0, 1), (0, 1)), 1)
self.assertEqual(overlap((0, 1), (1, 1)), 0)
self.assertEqual(overlap((0, 1), (0, 2)), 1)
self.assertEqual(overlap((0, 1), (1, 2)), 0)
self.assertEqual(overlap((0, 2), (1, 2)), 1)
self.assertEqual(overlap((0, 2), (1, 3)), 1)
self.assertEqual(overlap((0, 2), (0, 3)), 2)
self.assertEqual(overlap((0, 2), (2, 4)), 0)
self.assertEqual(overlap((0, 3), (1, 2)), 1)
self.assertEqual(overlap((0, 4), (1, 3)), 2)
self.assertEqual(overlap((0, 4), (2, 4)), 2)
self.assertEqual(overlap((0, 4), (0, 2)), 2)
# TODO: add tests for:
# inversion directly from BAM
# inversion of an area that has no reads in the BAM
# max clip len
|
|
chemdner_sample_base = "corpora/CHEMDNER/CHEMDNER_SAMPLE_JUNE25/"
cpatents_sample_base = "corpora/CHEMDNER-patents/chemdner_cemp_sample_v02/"
pubmed_test_base = "corpora/pubmed-test/"
transmir_base = "corpora/transmir/"
chemdner2017_base = "corpora/CHEMDNER2017/"
chemdner2017_1k = "corpora/CHEMDNER2017_1k/"
mirnacorpus_base = "corpora/miRNACorpus/"
mirtex_base = "corpora/miRTex/"
jnlpba_base = "corpora/JNLPBA/"
paths = {}
for i in range(1,11):
paths["mirna_ds{}".format(i)] = {'corpus': "corpora/mirna-ds/abstracts_11k.txt_{}.pickle".format(i),
'format': "mirna",
'annotations': ""}
paths["mirna_ds_annotated{}".format(i)] = {'corpus': "data/mirna_ds_annotated_{}.pickle".format(i),
'format': "mirna",
'annotations': ""
}
paths.update({
### miRNA corpus (Bagewadi 2013)
'miRNACorpus_train': {
'text': mirnacorpus_base + "miRNA-Train-Corpus.xml",
'annotations': mirnacorpus_base + "miRNA-Train-Corpus.xml",
'corpus': "data/miRNA-Train-Corpus.xml.pickle",
'format': "ddi-mirna"
},
'miRNACorpus_test': {
'text': mirnacorpus_base + "miRNA-Test-Corpus.xml",
'annotations': mirnacorpus_base + "miRNA-Test-Corpus.xml",
'corpus': "data/miRNA-Test-Corpus.xml.pickle",
'format': "ddi-mirna"
},
### miRTex corpus (Li 2015)
'miRTex_dev': {
'text': mirtex_base + "development/",
'annotations': mirtex_base + "development/",
'corpus': "data/miRTex-development.txt.pickle",
'format': "mirtex"
},
'miRTex_test': {
'text': mirtex_base + "test/",
'annotations': mirtex_base + "test/",
'corpus': "data/miRTex-test.txt.pickle",
'format': "mirtex"
},
'lurie_train': {
'text': "corpora/luriechildren/train/texts/",
'annotations': "corpora/luriechildren/train/annotations/",
'corpus': "data/luriechildren_train.txt.pickle",
'format': "brat"
},
'lurie_test': {
'text': "corpora/luriechildren/test/texts/",
'annotations': "corpora/luriechildren/test/annotations/",
'corpus': "data/luriechildren_test.txt.pickle",
'format': "brat"
},
'mirna_cf': {
'corpus': "corpora/cf_corpus/abstracts.txt.pickle",
'format': "mirna",
'annotations': ""
},
'mirna_cf_annotated': {
'corpus': "data/mirna_cf_annotated.pickle",
'format': "mirna",
'annotations': ""
},
'mirna_ds': {
'corpus': "corpora/mirna-ds/abstracts_11k.txt.pickle",
'format': "mirna",
'annotations': ""
},
'mirna_ds_annotated': {
'corpus': "corpora/mirna-ds/mirna_ds_annotated.pickle",
'format': "mirna",
'annotations': ""
},
### BioNLP/NLPBA 2004 (GENIA version 3.02)
'jnlpba_train': {
'text': jnlpba_base + "train/Genia4ERtask2.iob2",
'annotations': jnlpba_base + "train/Genia4ERtask2.iob2",
'corpus': "data/Genia4ERtask1.raw.pickle",
'format': "jnlpba"
},
'jnlpba_test': {
'text': jnlpba_base + "test/Genia4EReval2.iob2",
'annotations': jnlpba_base + "test/Genia4EReval2.iob2",
'corpus': "data/Genia4EReval1.raw.pickle",
'format': "jnlpba"
},
### TransmiR corpus
'transmir': {
'text': "data/transmir_v1.2.tsv",
'annotations': "data/transmir_v1.2.tsv",
'corpus': "data/transmir_v1.2.tsv.pickle",
'format': "transmir"
},
'transmir_annotated': {
'text': "data/transmir_v1.2.tsv",
'annotations': "data/transmir_v1.2.tsv",
'corpus': "data/transmir_annotated.pickle",
'format': "transmir"
},
'pubmed_test': {
'text': pubmed_test_base + "pmids_test.txt",
'annotations': "",
'corpus': "data/pmids_test.txt.pickle",
'format': "pubmed"
},
### BioCreative '15 CHEMDNER subtask
'cemp_sample':{
'text': cpatents_sample_base + "chemdner_patents_sample_text.txt",
'annotations': cpatents_sample_base + "chemdner_cemp_gold_standard_sample.tsv",
'cem': cpatents_sample_base + "chemdner_cemp_gold_standard_sample_eval.tsv",
'corpus': "data/chemdner_patents_sample_text.txt.pickle",
'format': "chemdner",
},
'chemdner2017':{
'text': chemdner2017_base + "BioCreative V.5 training set.txt",
'annotations': chemdner2017_base + "CEMP_BioCreative V.5 training set annot.tsv",
#'cem': chemdner2017 + "chemdner_cemp_gold_standard_sample_eval.tsv",
'corpus': "data/chemdner_v5_text.txt.pickle",
'format': "chemdner",
},
'chemdner2017_train':{
'text': chemdner2017_base + "chemdner2017_training_set.tsv",
'annotations': chemdner2017_base + "train_annotations.tsv",
#'cem': chemdner2017 + "chemdner_cemp_gold_standard_sample_eval.tsv",
'corpus': "data/chemdner_train_text.txt.pickle",
'format': "chemdner",
},
'chemdner2017_dev': {
'text': chemdner2017_base + "chemdner2017_development_set.tsv",
'annotations': chemdner2017_base + "dev_annotations.tsv",
# 'cem': chemdner2017 + "chemdner_cemp_gold_standard_sample_eval.tsv",
'corpus': "data/chemdner_dev_text.txt.pickle",
'format': "chemdner",
},
'ensemble_chemdner_dev': {
'text': chemdner2017_base + "chemdner2017_development_set.tsv",
'annotations': chemdner2017_base + "dev_annotations.tsv",
# 'cem': chemdner2017 + "chemdner_cemp_gold_standard_sample_eval.tsv",
'corpus': "data/chemdner_dev_ensemble.pickle",
'format': "chemdner",
},
'ensemble_chemdner_dev_ssm': {
'text': chemdner2017_base + "chemdner2017_development_set.tsv",
'annotations': chemdner2017_base + "dev_annotations.tsv",
# 'cem': chemdner2017 + "chemdner_cemp_gold_standard_sample_eval.tsv",
'corpus': "data/chemdner_dev_ensemble_ssm.pickle",
'format': "chemdner",
},
'chemdner2017_eval': {
'text': chemdner2017_base + "chemdner2017_eval_set.tsv",
'annotations': chemdner2017_base + "eval_annotations.tsv",
'cem': chemdner2017_base + "eval_annotations.tsv",
'corpus': "data/chemdner_eval_text.txt.pickle",
'format': "chemdner",
},
'ensemble_chemdner_eval': {
'text': chemdner2017_base + "chemdner2017_eval_set.tsv",
'annotations': chemdner2017_base + "eval_annotations.tsv",
'cem': chemdner2017_base + "eval_annotations.tsv",
'corpus': "data/chemdner_eval_ensemble.pickle",
'format': "chemdner",
},
'ensemble_chemdner_eval_ssm': {
'text': chemdner2017_base + "chemdner2017_eval_set.tsv",
'annotations': chemdner2017_base + "eval_annotations.tsv",
'cem': chemdner2017_base + "eval_annotations.tsv",
'corpus': "data/chemdner_eval_ensemble_ssm.pickle",
'format': "chemdner",
},
'chemdner2017_test':{
'text': chemdner2017_base + "test_set_patents_BioCreative_V.5.tsv",
'annotations': "",
#'cem': chemdner2017 + "chemdner_cemp_gold_standard_sample_eval.tsv",
'corpus': "data/chemdner_test_text.txt.pickle",
'format': "chemdner",
},
'ensemble_chemdner_test': {
'text': chemdner2017_base + "test_set_patents_BioCreative_V.5.tsv",
'annotations': "",
# 'cem': chemdner2017 + "chemdner_cemp_gold_standard_sample_eval.tsv",
'corpus': "data/chemdner_test_ensemble.pickle",
'format': "chemdner",
},
'ensemble_chemdner_test_ssm': {
'text': chemdner2017_base + "test_set_patents_BioCreative_V.5.tsv",
'annotations': "",
# 'cem': chemdner2017 + "chemdner_cemp_gold_standard_sample_eval.tsv",
'corpus': "data/chemdner_test_ensemble_ssm.pickle",
'format': "chemdner",
},
})
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserPageView'
db.create_table('website_userpageview', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('url', self.gf('django.db.models.fields.CharField')(max_length=210, null=True, blank=True)),
('view_datetime', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('website', ['UserPageView'])
def backwards(self, orm):
# Deleting model 'UserPageView'
db.delete_table('website_userpageview')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'website.action': {
'Meta': {'object_name': 'Action'},
'action_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ActionCategory']", 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.actioncategory': {
'Meta': {'object_name': 'ActionCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.actiontutorial': {
'Meta': {'object_name': 'ActionTutorial'},
'action_identifier': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'})
},
'website.address': {
'Meta': {'object_name': 'Address'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'website.answerchoice': {
'Meta': {'object_name': 'AnswerChoice'},
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']"}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'website.answerchoicegroup': {
'Meta': {'object_name': 'AnswerChoiceGroup'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.answerreference': {
'Meta': {'object_name': 'AnswerReference'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_callout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'migrated_answer_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicability': {
'Meta': {'object_name': 'Applicability'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.application': {
'Meta': {'object_name': 'Application'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'applicant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'current_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']", 'null': 'True', 'blank': 'True'})
},
'website.applicationanswer': {
'Meta': {'object_name': 'ApplicationAnswer'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Application']"}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicationhistory': {
'Meta': {'object_name': 'ApplicationHistory'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Application']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'status_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'website.comment': {
'Meta': {'object_name': 'Comment'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comment_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_reference'", 'null': 'True', 'to': "orm['website.Comment']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.document': {
'Meta': {'object_name': 'Document'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'file_path': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Region']", 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.documentcategory': {
'Meta': {'object_name': 'DocumentCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.entityview': {
'Meta': {'object_name': 'EntityView'},
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'session_key': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.entityviewcount': {
'Meta': {'object_name': 'EntityViewCount'},
'count_30_days': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'total_count': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.jurisdiction': {
'Meta': {'object_name': 'Jurisdiction'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'last_contributed': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'last_contributed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'last_contributed_by_org': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_contributor'", 'null': 'True', 'to': "orm['website.Organization']"}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_jurisdiction'", 'null': 'True', 'to': "orm['website.Organization']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_jurisdiction'", 'null': 'True', 'to': "orm['website.Jurisdiction']"}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Region']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.jurisdictioncontributor': {
'Meta': {'object_name': 'JurisdictionContributor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.migrationhistory': {
'Meta': {'object_name': 'MigrationHistory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes2': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'source_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'source_table': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'target_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'target_table': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.organization': {
'Meta': {'object_name': 'Organization'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.OrganizationCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'fax': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'logo_scaled': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'parent_org': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'website.organizationaddress': {
'Meta': {'object_name': 'OrganizationAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'address_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'})
},
'website.organizationcategory': {
'Meta': {'object_name': 'OrganizationCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.organizationmember': {
'Meta': {'object_name': 'OrganizationMember'},
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitation_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'invitor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_invitor'", 'null': 'True', 'to': "orm['auth.User']"}),
'join_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Person']", 'null': 'True', 'blank': 'True'}),
'requested_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RoleType']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_user'", 'null': 'True', 'to': "orm['auth.User']"})
},
'website.organizationrating': {
'Meta': {'object_name': 'OrganizationRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.person': {
'Meta': {'object_name': 'Person'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'phone_mobile': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'phone_primary': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'phone_secondary': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.personaddress': {
'Meta': {'object_name': 'PersonAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'address_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Person']", 'null': 'True', 'blank': 'True'})
},
'website.question': {
'Meta': {'object_name': 'Question'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']", 'null': 'True', 'blank': 'True'}),
'applicability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Applicability']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'default_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'display_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field_attributes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field_suffix': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'form_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'has_multivalues': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instruction': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'js': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'migration_type': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'qtemplate': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']", 'null': 'True'}),
'question': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'state_exclusive': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'terminology': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'validation_class': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.questioncategory': {
'Meta': {'object_name': 'QuestionCategory'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.questiondependency': {
'Meta': {'object_name': 'QuestionDependency'},
'answer_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_questionDependency_question1'", 'to': "orm['website.Question']"}),
'question2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_questionDependency_question2'", 'to': "orm['website.Question']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'strength': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'website.ratingcategory': {
'Meta': {'object_name': 'RatingCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rating_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.ratinglevel': {
'Meta': {'object_name': 'RatingLevel'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rank': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.reaction': {
'Meta': {'object_name': 'Reaction'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Action']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ReactionCategory']", 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'reaction_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.reactioncategory': {
'Meta': {'object_name': 'ReactionCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.region': {
'Meta': {'object_name': 'Region'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.rewardcategory': {
'Meta': {'object_name': 'RewardCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.roletype': {
'Meta': {'object_name': 'RoleType'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.servervariable': {
'Meta': {'object_name': 'ServerVariable'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.template': {
'Meta': {'object_name': 'Template'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'})
},
'website.templatequestion': {
'Meta': {'object_name': 'TemplateQuestion'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"})
},
'website.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'start_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.tutorialpage': {
'Meta': {'object_name': 'TutorialPage'},
'display_order': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'selector': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'tip': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'})
},
'website.usercommentview': {
'Meta': {'object_name': 'UserCommentView'},
'comments_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'last_comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Comment']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'view_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'website.userdetail': {
'Meta': {'object_name': 'UserDetail'},
'display_preference': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'migrated_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'notification_preference': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'old_password': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'reset_password_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '124', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userfavorite': {
'Meta': {'object_name': 'UserFavorite'},
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userpageview': {
'Meta': {'object_name': 'UserPageView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '210', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'view_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.userrating': {
'Meta': {'object_name': 'UserRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userreward': {
'Meta': {'object_name': 'UserReward'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reward': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RewardCategory']", 'null': 'True', 'blank': 'True'}),
'reward_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.usersearch': {
'Meta': {'object_name': 'UserSearch'},
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'search_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.usertutorialhistory': {
'Meta': {'object_name': 'UserTutorialHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'}),
'user_email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'view_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.usertutorialpagehistory': {
'Meta': {'object_name': 'UserTutorialPageHistory'},
'checked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.TutorialPage']", 'null': 'True', 'blank': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'}),
'user_email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'})
},
'website.zipcode': {
'Meta': {'object_name': 'Zipcode'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '2', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'})
}
}
complete_apps = ['website']
|
|
##############################################################
# These constants are used in various files.
# If you need to define a value that will be used in those files,
# just define it here rather than copying it across each file, so
# that it will be easy to change it if you need to.
##############################################################
########################################################
## PLAYER SLOTS #############################
########################################################
slot_player_faction_id = 0
slot_player_spawn_state = 1 # listed below, starting with player_spawn_state_
slot_player_spawn_invulnerable_time = 2 # mission time when the player spawned with temporary invlunerability
slot_player_spawn_health_percent = 3 # saved health percentage to be applied when next spawning
slot_player_spawn_entry_point = 4 # entry point used at last spawn
player_spawn_state_dead = 0
player_spawn_state_invulnerable = 1 # while invlunerable soon after spawning
player_spawn_state_at_marker = 2 # set before spawning to indicate that the agent should be shifted to the player's marker scene prop
player_spawn_state_alive = 3
slot_player_inactive_index = 5 # index in the inactive players array, if stored
slot_player_next_chat_event_type = 6 # next chat event number that the server expects this player's client to use
slot_player_list_button_id = 7 # overlay id in the player list presentation
slot_player_outlaw_rating = 8
slot_player_is_lord = 9
slot_player_non_lord_troop_id = 10 # the last troop used before changing to a lord only troop, to revert after respawning if someone else is voted lord
slot_player_poll_faction_id = 11 # marks whether the player can vote in the current poll
slot_player_requested_spawn_point = 12 # the spawn point requested by the player after dying, if any; -1 to indicate a newly connected player that hasn't yet requested to spawn
slot_player_has_faction_door_key = 13
slot_player_has_faction_money_key = 14
slot_player_has_faction_item_key = 15
slot_player_teleport_to_ship_no = 16 # instance no of the last ship teleported to with the admin tool
slot_player_last_faction_kicked_from = 17 # stores when kicked from a faction, so subsequent kicks can be free of cost
slot_player_accessing_instance_id = 18 # stores the instance id of the inventory currently being accessed by the player, for updates if anyone else changes it
slot_player_last_action_time = 19 # mission time of the last action that should be prevented from quick repetition
slot_player_equip_item_0 = 20 # module equipment slots corresponding to the hard coded ones in header_items starting with ek_
slot_player_equip_item_1 = 21
slot_player_equip_item_2 = 22
slot_player_equip_item_3 = 23
slot_player_equip_head = 24
slot_player_equip_body = 25
slot_player_equip_foot = 26
slot_player_equip_gloves = 27
slot_player_equip_horse = 28
slot_player_equip_end = 29
slot_player_equip_item_0_ammo = 30
slot_player_equip_item_1_ammo = 31
slot_player_equip_item_2_ammo = 32
slot_player_equip_item_3_ammo = 33
slot_player_spawn_food_amount = 34 # saved food for next spawn
slot_player_faction_chat_muted = 35
slot_player_kick_at_time = 36 # time to kick a player after the name server has rejected them, to allow time to recieve the message
slot_player_can_faction_announce = 37
slot_player_next_spawn_health_percent = 38 # spawn health percentage for the troop applied after death, if that server option is enabled
slot_player_accessing_unique_id = 39 # a unique number identifying an inventory scene prop being accessed that could despawn and the instance id be reused, like corpses
slot_player_admin_no_panel = 40 # admin permission slots: the default value 0 is permissive so everything works when a name server is not connected
slot_player_admin_no_gold = 41
slot_player_admin_no_kick = 42
slot_player_admin_no_temporary_ban = 43
slot_player_admin_no_permanent_ban = 44
slot_player_admin_no_kill_fade = 45
slot_player_admin_no_freeze = 46
slot_player_admin_no_teleport_self = 47
slot_player_admin_no_admin_items = 48
slot_player_admin_no_heal_self = 49
slot_player_admin_no_godlike_troop = 50
slot_player_admin_no_ships = 51
slot_player_admin_no_announce = 52
slot_player_admin_no_override_poll = 53
slot_player_admin_no_all_items = 54
slot_player_admin_no_mute = 55
slot_player_admin_no_animals = 56
slot_player_admin_no_factions = 57
slot_player_admin_end = 58
########################################################
## AGENT SLOTS #############################
########################################################
slot_agent_horse_last_rider = 0 # if a horse, the agent id of the last (or current) rider, or if stray, negative numbers counting down to when the horse will be removed
slot_agent_drowning_count = 1 # counts upwards each time an agent is found to be drowning underwater
slot_agent_poison_amount = 2 # increases each time the agent is attacked with poison, reduced when healed
slot_agent_poisoner_agent_id = 3 # agent id that last poisoned the agent
slot_agent_poisoner_player_uid = 4 # player unique id of the poisoner when applicable, to give correct death messages
slot_agent_freeze_instance_id = 5 # instance id of the invisible scene prop being used to freeze
slot_agent_is_targeted = 6 # mark that the stored target agent id is correct
slot_agent_food_amount = 7
slot_agent_fishing_last_school = 8 # last school fished from, to speed up repetitive check
slot_agent_last_horse_ridden = 9
slot_agent_money_bag_1_value = 10 # the values of the money bags picked up, in order
slot_agent_money_bag_2_value = 11
slot_agent_money_bag_3_value = 12
slot_agent_money_bag_4_value = 13
slot_agent_hunting_last_carcass = 14 # last animal carcass processed, to speed up repetitive checks
slot_agent_died_normally = 15
slot_agent_animation_end_time_ms = 16 # mission time in milliseconds
slot_agent_last_animation_string_id = 17
slot_agent_recent_animations_delay_ms = 18 # interval in milliseconds
slot_agent_storage_corpse_instance_id = 19 # saved when discarding armor
slot_agent_animal_herd_manager = 20 # instance id of the herd manager item attached to
slot_agent_animal_birth_time = 21 # mission time when the animal was spawned as a child, or extrapolated if spawned as an adult
slot_agent_animal_grow_time = 22 # mission time after which the animal will grow to an adult or birth a child
slot_agent_animal_move_time = 23 # mission time after which to move
slot_agent_animal_last_damage_time = 24
slot_agent_animal_food = 25
slot_agent_animal_carcass_instance_id = 26
slot_agent_animal_times_stuck = 27
slot_agent_animal_end = 28
slot_agent_head_damage_factor = 40 # agent modifier factors for armor slots
slot_agent_head_speed_factor = 41
slot_agent_head_accuracy_factor = 42
slot_agent_head_reload_factor = 43
slot_agent_body_damage_factor = 44
slot_agent_body_speed_factor = 45
slot_agent_body_accuracy_factor = 46
slot_agent_body_reload_factor = 47
slot_agent_foot_damage_factor = 48
slot_agent_foot_speed_factor = 49
slot_agent_foot_accuracy_factor = 50
slot_agent_foot_reload_factor = 51
slot_agent_hand_damage_factor = 52
slot_agent_hand_speed_factor = 53
slot_agent_hand_accuracy_factor = 54
slot_agent_hand_reload_factor = 55
slot_agent_armor_damage_factor = 56 # total agent modifier factors for armor
slot_agent_armor_speed_factor = 57
slot_agent_armor_accuracy_factor = 58
slot_agent_armor_reload_factor = 59
slot_agent_weapon_damage_factor = 60 # agent modifier factors for the wielded weapon
slot_agent_weapon_speed_factor = 61
slot_agent_weapon_accuracy_factor = 62
slot_agent_weapon_reload_factor = 63
slot_agent_cannot_attack = 64 # marks that any attack should be canceled
slot_agent_armor_damage_through = 65 # factor of letting damage received bleed through the armor
slot_agent_last_apply_factors_item_id = 66 # last item id that modifier factors were last checked for, to avoid duplicating calculations due to trigger activation quirks
########################################################
## SCENE PROP SLOTS #############################
########################################################
slot_scene_prop_item_id = 0 # main associated item id, for stockpiles and similar
slot_scene_prop_gold_value = 1 # preset gold value, or cached value of the associated item
slot_scene_prop_gold_multiplier = 2 # cached price multiplier of the associated item
slot_scene_prop_use_string = 3 # string id displayed when players look at the scene prop
slot_scene_prop_troop_id = 4 # for troop training stations
slot_scene_prop_full_hit_points = 5
slot_scene_prop_is_mercenary = 6 # 1 = stay associated with the faction that owned the castle at mission start, rather than changing with capture
slot_scene_prop_required_horse = 7 # horse item required for attaching to a cart
slot_scene_prop_average_craft_skill = 8
slot_scene_prop_is_resource_stockpile = 9 # marks stockpiles for raw resources, which have different sell price calculations
slot_scene_prop_linked_scene_prop = 10 # instance ids of linked scene props
slot_scene_prop_linked_scene_prop_1 = 10
slot_scene_prop_linked_scene_prop_2 = 11
slot_scene_prop_linked_scene_prop_3 = 12
slot_scene_prop_linked_scene_prop_4 = 13
linked_scene_prop_slot_count = 4
slot_scene_prop_linked_sail = slot_scene_prop_linked_scene_prop_1
slot_scene_prop_linked_sail_off = slot_scene_prop_linked_scene_prop_2
slot_scene_prop_linked_ramp = slot_scene_prop_linked_scene_prop_3
slot_scene_prop_linked_hold = slot_scene_prop_linked_scene_prop_4
slot_scene_prop_linked_platform_1 = slot_scene_prop_linked_scene_prop_1
slot_scene_prop_linked_platform_2 = slot_scene_prop_linked_scene_prop_2
slot_scene_prop_linked_ferry_winch = slot_scene_prop_linked_scene_prop_3
slot_scene_prop_position = 15 # multiple meanings, mostly ships and carts - use with care
slot_scene_prop_target_position = 16 # used for ships
slot_scene_prop_rotation = 17 # multiple meanings, for ships, carts, and doors - use with care
slot_scene_prop_target_rotation = 18 # used for ships
slot_scene_prop_max_position = slot_scene_prop_rotation
slot_scene_prop_max_distance = slot_scene_prop_target_rotation
slot_scene_prop_attached_to_agent = 19 # store agent id attached to
slot_scene_prop_controlling_agent = 20 # agent id steering the ship
slot_scene_prop_length = 21 # multiple meanings - use with care
slot_scene_prop_width = 22 # multiple meanings - use with care
slot_scene_prop_height = 23 # multiple meanings - use with care
slot_scene_prop_collision_kind = 24 # collision testing scene prop kind for ships; set to -1 for scene props that should never be checked for collision with ships
slot_scene_prop_speed_limit = 25 # used for ships
slot_scene_prop_no_move_physics = 26 # whether to disable physics when moving, so agents can't ride on the prop
slot_scene_prop_capture_faction_id = 27 # faction that has captured this prop individually, rather than the castle it belongs to
slot_scene_prop_next_resource_hp = 30 # hit points when the next resource item should be produced
slot_scene_prop_state = 31 # constants below starting with scene_prop_state_
slot_scene_prop_state_time = 32 # mission time involved with changing state, if appropriate
slot_scene_prop_stock_count = 33
slot_scene_prop_stock_count_update_time = 34 # on clients, time of the last stock count update, to prevent quickly repeated requests
slot_scene_prop_unlocked = 35
slot_scene_prop_regrow_script = 36 # script id to call when finished regrowing
slot_scene_prop_resource_item_id = 37
slot_scene_prop_prune_time = 38 # mission time when a spawned item scene prop will be pruned
slot_scene_prop_resources_default_cost = 39
slot_scene_prop_water = 40
slot_scene_prop_seeds = 41
slot_scene_prop_show_linked_hit_points = 45
scene_prop_state_active = 0
scene_prop_state_destroyed = 1
scene_prop_state_hidden = 2
scene_prop_state_regenerating = 3
slot_scene_prop_inventory_targeted = 196 # instance id of targeted inventory, used with item scene props
slot_scene_prop_inventory_unique_id = 197 # unique number identifying spawned item scene props
slot_scene_prop_inventory_max_length = 198 # maximum length of items inside the container
slot_scene_prop_inventory_count = 199 # number of inventory slots for this container
slot_scene_prop_inventory_begin = 200 # item ids in container
slot_scene_prop_inventory_item_0 = 290 # item ids in player equipment
slot_scene_prop_inventory_ammo_begin = 300 # ammo counts in container
slot_scene_prop_inventory_mod_begin = 400 # item changes in container needing presentation updates
slot_scene_prop_inventory_mod_item_0 = 490 # item changes in player equipment needing presentation updates
slot_scene_prop_inventory_obj_begin = 500 # container slot overlay ids
slot_scene_prop_inventory_obj_item_0 = 590 # player equipment overlay ids
slot_scene_prop_inventory_mesh_begin = 600 # container item mesh overlay ids
slot_scene_prop_inventory_mesh_item_0 = 690 # player equipment item mesh overlay ids
slot_scene_prop_inventory_end = slot_scene_prop_inventory_ammo_begin
slot_scene_prop_inventory_mod_end = slot_scene_prop_inventory_obj_begin
slot_scene_prop_inventory_obj_end = slot_scene_prop_inventory_mesh_begin
inventory_count_maximum = slot_scene_prop_inventory_item_0 - slot_scene_prop_inventory_begin
corpse_inventory_slots = 5 # coded into the module so values are the same on the server and clients
corpse_inventory_max_length = 100
slot_animal_herd_manager_adult_item_id= 100
slot_animal_herd_manager_starving = 101
slot_animal_carcass_meat_count = 100
slot_animal_carcass_hide_count = 101
########################################################
## ITEM SLOTS #############################
########################################################
slot_item_difficulty = 0
slot_item_length = 1
slot_item_class = 2 # listed below, starting with item_class_
slot_item_resource_amount = 3 # resource amount of the item class
slot_item_gender = 4 # 0 = male or anyone, 1 = female only
slot_item_max_ammo = 5
slot_item_bonus_against_wood = 6
slot_item_couchable = 7
slot_item_has_attack_requirements = 8
slot_item_max_raw_damage = 9 # maximum out of swing and thrust damage
item_class_none = 0
item_class_repair = 1
item_class_wood_cutting = 2
item_class_wood = 3
item_class_mining = 4
item_class_iron = 5
item_class_lock_pick = 6
item_class_heraldic = 7 # marks the item to be redrawn when the banner changes
item_class_precious = 8
item_class_food = 9
item_class_grain_harvesting = 10
item_class_knife = 11
item_class_cloth = 12
item_class_leather = 13
item_class_herding_calm = 14
item_class_herding_rouse = 15
slot_item_animal_adult_item_id = 20
slot_item_animal_child_item_id = 21
slot_item_animal_grow_time = 22
slot_item_animal_max_in_herd = 23
slot_item_animal_attack_reaction = 24 # listed below, starting with animal_reaction_
slot_item_animal_death_sound = 25
slot_item_animal_meat_count = 26
slot_item_animal_hide_count = 27
slot_item_animal_wildness = 28 # higher values have greater unpredictability when herded or attacked
animal_reaction_flee = 0
animal_reaction_charge = 1
########################################################
## FACTION SLOTS #############################
########################################################
slot_faction_banner_mesh = 0
slot_faction_name_is_custom = 1 # 1 if the name has been changed from the default
slot_faction_is_active = 2 # 1 if the faction has at least one capture point associated with their castle at mission start
slot_faction_lord_player_uid = 3 # player unique id of the faction lord
slot_faction_lord_last_seen_time = 4
slot_faction_castle_banner_variant = 5 # work around an unwanted engine optimization: change tableau id used when changing faction banners to force them to update
slot_faction_list_button_id = 6 # overlay id in the faction list presentation
slot_faction_is_locked = 7 # 1 if an adminstrator locked the faction to prevent lord polls
slot_faction_poll_end_time = 20
slot_faction_poll_voter_count = 21
slot_faction_poll_yes_votes = 22
slot_faction_poll_no_votes = 23
slot_faction_poll_type = 24 # listed below, starting with poll_type_
slot_faction_poll_value_1 = 25
slot_faction_poll_value_2 = 26
slot_faction_poll_target_unique_id = 27 # when targeting a player, store their unique id to prevent accidentally harming another player reusing their id after they quit
poll_type_change_scene = 0
poll_type_kick_player = 1
poll_type_ban_player = 2
poll_type_faction_lord = 10
poll_cost_change_scene = 1000
poll_cost_kick_player = 500
poll_cost_ban_player = 700
poll_cost_faction_lord = 1000
poll_vote_no = 0
poll_vote_yes = 1
poll_vote_admin_no = 2
poll_vote_admin_yes = 3
poll_vote_abstain = 4
poll_result_no = -1
poll_result_yes = -2
poll_result_admin_no = -3
poll_result_admin_yes = -4
poll_result_existing = -5
poll_result_invalid = -6
poll_result_color = 0xFF0000
slot_faction_relations_begin = 30
faction_cost_change_banner = 500
faction_cost_change_name = 500
faction_cost_kick_player = 500
faction_cost_outlaw_player = 1000
########################################################
## SCENE SLOTS #############################
########################################################
########################################################
## TROOP SLOTS #############################
########################################################
troop_slot_count_per_equipment_type = 5
slot_troop_equipment_one_hand_begin = 0
slot_troop_equipment_two_hand_begin = 1 * troop_slot_count_per_equipment_type
slot_troop_equipment_ranged_begin = 2 * troop_slot_count_per_equipment_type
slot_troop_equipment_ammo_begin = 3 * troop_slot_count_per_equipment_type
slot_troop_equipment_shield_begin = 4 * troop_slot_count_per_equipment_type
slot_troop_equipment_head_begin = 5 * troop_slot_count_per_equipment_type
slot_troop_equipment_body_begin = 6 * troop_slot_count_per_equipment_type
slot_troop_equipment_foot_begin = 7 * troop_slot_count_per_equipment_type
slot_troop_equipment_hand_begin = 8 * troop_slot_count_per_equipment_type
slot_troop_equipment_horse_begin = 9 * troop_slot_count_per_equipment_type
slot_troop_ranking = 50 # used for sorting troop types in the player stats chart
slot_troop_spawn_health_percent = 51 # respawn health percentage when dying as this troop
slot_player_array_size = 0
slot_player_array_begin = 1
player_array_unique_id = 0
player_array_troop_id = 1
player_array_faction_id = 2
player_array_gold_value = 3
player_array_outlaw_rating = 4
player_array_entry_size = 5 # number of values stored in the disconnected players array
max_castle_count = 8
slot_mission_data_castle_owner_faction_begin = 0 # owner factions of all castles
slot_mission_data_castle_owner_faction_end = 8
slot_mission_data_castle_is_active_begin = 10 # flags of which castles are active, with at least 1 capture point
slot_mission_data_castle_is_active_end = 18
slot_mission_data_castle_name_string_begin = 20 # string ids for castle names
slot_mission_data_castle_name_string_end = 28
slot_mission_data_castle_money_chest_begin = 30 # instance ids of the main money chest linked to each castle
slot_mission_data_castle_money_chest_end = 38
slot_mission_data_castle_allows_training_begin = 40 # flags of which active castles have at least one linked training station
slot_mission_data_castle_allows_training_end = 48
slot_mission_data_faction_to_change_name_of = 100 # store the faction id for the next faction name change message
slot_last_chat_message_event_type = 0 # for the last chat message sent: network event number, combined with a type from the list below starting with chat_event_type_
slot_last_chat_message_not_recieved = 1 # mark that the server has not notified of receiving the last chat message
chat_event_type_local = 0 # for each chat type, holding shift while pressing enter will add 1 to the type
chat_event_type_local_shout = 1
chat_event_type_set_faction_name = 2
chat_event_type_faction = 4
chat_event_type_faction_announce = 5
chat_event_type_admin = 6
chat_event_type_admin_announce = 7
slot_chat_overlay_local_color = 0
slot_chat_overlay_faction_color = 1
slot_ship_array_count = 0 # count of ship instance ids in the scene
slot_ship_array_begin = 1 # array of ship instance ids
slot_ship_array_collision_props_count = 100 # stored instance ids of scene props near water level, for checking collision with ships
slot_ship_array_collision_props_begin = 101
slot_array_count = 0
slot_array_begin = 1
########################################################
## TEAM SLOTS #############################
########################################################
########################################################
spawn_invulnerable_time = 10 # time agents are invlunerable after freshly spawning
loop_player_check_outlaw_interval = 60
loop_agent_check_interval = 2
loop_horse_check_interval = 30
loop_health_check_interval = 29
stock_count_check_interval = 5 # don't request stock count updates of the scene prop aimed at more often than this
repeat_action_min_interval = 5 # prevent players from repeating certain potentially expensive actions more often than this
carcass_search_min_interval = 5 # only search for a different animal carcass to process after this interval from the last
poll_time_duration = 60
name_server_kick_delay_interval = 5 # delay before kicking from the server to allow the rejection message to be received
def sq(distance):
return distance * distance / 100 # get_sq_distance_between_positions always uses fixed point multiplier 100
max_distance_to_play_sound = 10000
max_distance_to_see_labels = 1500
max_distance_horse_rider = 5000
max_distance_local_chat = 3000
max_distance_local_chat_shout = 5000
ambient_distance_local_chat = 1000
ambient_distance_local_chat_shout = 2000
max_distance_local_animation = 2500
z_position_to_hide_object = -4999 # lower values might cause the position to "wrap around" up into the sky
z_position_water_level = -30 # approximate visible water level based on tests
max_distance_to_use = 300
max_distance_to_loot = 100
max_distance_admin_cart = 2000 # allow admins in their armor to attach carts from greater distances
max_distance_to_catch_fish = 2000
fish_school_max_move_distance = 500
fish_school_min_move_distance = 200
fish_school_minimum_depth = 200 # minimum water depth that a fish school will move into
fish_spawn_time = 300 # time before pruning fish items spawned
max_distance_to_include_in_herd = 5000 # when searching for a herd for an animal
castle_tax_gold_percentage = 20 # percentage of item price subtracted for selling price and added to the linked castle chest when bought
castle_training_gold_percentage = 50 # percentage of training cost added to the linked castle chest
craft_price_gold_reward_percentage = 20 # percentage of item price given to the crafter proportional to difference from target stock count
craft_skill_gold_reward_multiplier = 300 # multiplier of crafting skill required given to the crafter proportional to difference from target stock count
base_export_percentage = 100 # default percentage of item price for export stations
reduction_factor_base = 90
armor_damage_reduction_factor = 10
head_armor_speed_reduction_factor = 10
head_armor_accuracy_reduction_factor = 50
head_armor_reload_reduction_factor = 20
body_armor_speed_reduction_factor = 20
body_armor_accuracy_reduction_factor = 30
body_armor_reload_reduction_factor = 10
foot_armor_speed_reduction_factor = 30
foot_armor_accuracy_reduction_factor = 5
foot_armor_reload_reduction_factor = 5
hand_armor_speed_reduction_factor = 5
hand_armor_accuracy_reduction_factor = 30
hand_armor_reload_reduction_factor = 10
melee_damage_reduction_factor = 25
melee_speed_reduction_factor = 5
crossbow_damage_reduction_factor = 15
crossbow_speed_reduction_factor = 5
crossbow_accuracy_reduction_factor = 30
crossbow_reload_reduction_factor = 30
bow_thrown_damage_reduction_factor = 30
bow_thrown_speed_reduction_factor = 5
bow_thrown_accuracy_reduction_factor = 20
melee_max_level_difference = 3 # max strength difference to be able to swing a melee weapon
crossbow_max_level_difference = 4 # max strength difference to be able to shoot a crossbow
bow_ranged_max_level_difference = 3 # max power draw or power throw difference to be able to shoot a bow or throw a weapon
winch_type_drawbridge = 0
winch_type_portcullis = 1
winch_type_platform = 2
winch_type_sliding_door = 3
repairable_hit = 0
repairable_destroyed = 1
repairable_hit_destroyed = 2
repairable_repairing = 3
repairable_resource_required = 4
repairable_repaired = 5
ship_station_not_on_ship = 0
ship_station_none = 1
ship_station_mast = 2
ship_station_rudder = 3
ship_forwards_maximum = 9 # maximum forwards speed - also limited by ship type and agent skill
ship_rotation_maximum = 5 # maximum turning speed
ship_forwards_multiplier = 100
ship_rotation_multiplier = 3
player_list_item_height = 20
escape_menu_item_height = 35
admin_panel_item_height = 40
action_menu_item_height = 23
faction_menu_item_height = 120
animation_menu_item_height = 32
chat_overlay_item_height = 17
chat_overlay_ring_buffer_begin = "trp_chat_overlay_ring_buffer_0"
chat_overlay_ring_buffer_end = "trp_chat_overlay_ring_buffer_end"
chat_overlay_ring_buffer_size = 11
local_chat_color = 0xFFFFDD8A
local_chat_shout_color = 0xFFFF8C27
local_animation_color = 0xFFFFBBAA
admin_chat_color = 0xFFFF00FF
invalid_faction_color = 0xFF888888
outlaw_rating_for_kill = 2
outlaw_rating_for_team_kill = 5
outlaw_rating_for_lord_outlawed = 4
outlaw_rating_outlawed = 15 # outlaw players when they get this rating
outlaw_rating_maximum = 30 # don't add increase the rating more than this
change_faction_type_respawn = 0 # changing faction when training
change_faction_type_no_respawn = 1 # changing faction by clicking the use control, to the same troop type or one that allows it
change_faction_type_outlawed = 2 # being forced to change when outlawed, without respawning
capture_point_type_primary = 0 # after the required secondary points are captured, take over the castle
capture_point_type_secondary_all = 1 # require taking all secondary capture points of this type
capture_point_type_secondary_one = 2 # require taking at least one secondary capture point of this type
redraw_all_banners = 0 # at mission start on the server
redraw_castle_banners = 1 # when a castle is captured
redraw_faction_banners = 2 # when a faction lord changes their banner
redraw_client_banner_positions = 3 # at mission start on a client, to work around engine quirks with spawned items
redraw_single_capture_point_banner = 4 # when a secondary point is captured
inventory_slots_per_row = 6
inventory_slot_spacing = 100
inventory_mesh_offset = 50
inventory_container_x_offset = 190
inventory_container_y_offset = 175
scene_prop_hit_points_bar_scale_x = 6230
scene_prop_hit_points_bar_scale_y = 15000
select_agent_max_x = 300
select_agent_max_y = 200
presentation_max_x = 1000 # at fixed point multiplier 1000
presentation_max_y = 750 # at fixed point multiplier 1000
animation_menu_end_offset = 11
max_scene_prop_instance_id = 10000 # when trying to loop over all props in a scene, stop at this limit
max_food_amount = 100
max_hit_points_percent = 200
all_items_begin = "itm_tattered_headcloth"
all_items_end = "itm_all_items_end"
wielded_items_begin = "itm_club"
wielded_items_end = "itm_all_items_end"
scripted_items_begin = "itm_surgeon_scalpel" # items outside this range are not checked from the ti_on_agent_hit trigger
scripted_items_end = "itm_money_bag"
herd_animal_items_begin = "itm_deer" # item range used for herd animal spawners
herd_animal_items_end = "itm_stick"
playable_troops_begin = "trp_peasant" # troops outside this range are treated as storage objects unusable by players
playable_troops_end = "trp_playable_troops_end"
factions_begin = "fac_commoners"
castle_factions_begin = "fac_1"
factions_end = "fac_factions_end"
castle_names_begin = "str_castle_name_0"
castle_names_end = "str_castle_names_end"
scenes_begin = "scn_scene_1"
scenes_end = "scn_scenes_end"
scene_names_begin = "str_scene_name_1" # this range of strings must correspond to the available scene slots
scene_names_end = "str_scene_names_end"
game_type_mission_templates_begin = "mt_conquest"
game_type_mission_templates_end = "mt_edit_scene"
game_type_names_begin = "str_game_type_1"
game_type_names_end = "str_game_types_end"
game_type_info_strings_begin = "str_game_type_1_info"
banner_meshes_begin = "mesh_banner_a01"
banner_meshes_end = "mesh_banners_default_a"
banner_items_begin = "itm_pw_banner_pole_a01" # range of items associated with banner mesh ids
banner_items_end = "itm_admin_horse"
commands_module_system_names_begin = "str_bot_count" # range of strings associated with hard coded server commands
commands_napoleonic_wars_names_begin = "str_use_class_limits"
admin_action_log_strings_begin = "str_log_admin_kick" # range of strings associated with admin actions, for the server log
ambient_sounds_begin = "snd_fire_loop" # for ambient sound emitter scene props
ambient_sounds_end = "snd_sounds_end"
action_menu_strings_begin = "str_toggle_name_labels" # range of strings associated with the action menu
action_menu_strings_end = "str_action_menu_end"
animation_strings_begin = "str_anim_cheer" # range of strings associated with the animation menu
animation_strings_end = "str_log_animation"
profile_option_strings_begin = "str_display_name_labels" # range of strings for options stored in a player profile
from header_common import *
profile_options = [ # global flag variables for options stored in a player profile
"$g_display_agent_labels",
"$g_hide_faction_in_name_labels",
"$g_display_chat_overlay",
"$g_chat_overlay_type_selected",
"$g_disable_automatic_shadow_recalculation",
"$g_animation_menu_no_mouse_grab",
"$g_mute_global_chat",
]
if len(profile_options) >= profile_banner_id_option_bits_end - profile_banner_id_option_bits_begin:
raise Exception("Too many profile options: %d, maximum %d" % (len(profile_options), profile_banner_id_option_bits_end - profile_banner_id_option_bits_begin))
|
|
#!/usr/bin/env python
import sys
sys.dont_write_bytecode = True
import glob
import yaml
import json
import os
import sys
import time
import logging
from argparse import ArgumentParser
from slackclient import SlackClient
def dbg(debug_string):
if debug:
logging.info(debug_string)
USER_DICT = {}
class RtmBot(object):
def __init__(self, token):
self.last_ping = 0
self.token = token
self.bot_plugins = []
self.slack_client = None
self.channel = None # only want bot in one channel
def connect(self):
"""Convenience method that creates Server instance"""
self.slack_client = SlackClient(self.token)
self.slack_client.rtm_connect()
def start(self):
self.connect()
self.load_plugins()
while True:
for reply in self.slack_client.rtm_read():
self.input(reply)
self.crons()
self.output()
self.autoping()
time.sleep(.1)
def get_users_in_channel(self):
print(self.channel)
channel_info = self.slack_client.api_call("channels.info", channel=self.channel)
info = json.loads(channel_info)
members = info['channel']['members']
print(members)
self.create_user_dict(members)
def autoping(self):
#hardcode the interval to 3 seconds
now = int(time.time())
if now > self.last_ping + 3:
self.slack_client.server.ping()
self.last_ping = now
def input(self, data):
if "type" in data:
function_name = "process_" + data["type"]
dbg("got {}".format(function_name))
for plugin in self.bot_plugins:
plugin.register_jobs()
plugin.do(function_name, data)
def output(self):
for plugin in self.bot_plugins:
limiter = False
for output in plugin.do_output():
channel = self.slack_client.server.channels.find(output[0])
if channel != None and output[1] != None:
if limiter == True:
time.sleep(.1)
limiter = False
message = output[1].encode('ascii','ignore')
channel.send_message("{}".format(message))
limiter = True
def crons(self):
for plugin in self.bot_plugins:
plugin.do_jobs()
def load_plugins(self):
for plugin in glob.glob(directory+'/plugins/*'):
sys.path.insert(0, plugin)
sys.path.insert(0, directory+'/plugins/')
for plugin in glob.glob(directory+'/plugins/*.py') + glob.glob(directory+'/plugins/*/*.py'):
logging.info(plugin)
name = plugin.split('/')[-1][:-3]
# try:
self.bot_plugins.append(Plugin(name))
# except:
# print "error loading plugin %s" % name
class Plugin(object):
def __init__(self, name, plugin_config={}):
self.name = name
self.jobs = []
self.module = __import__(name)
self.register_jobs()
self.outputs = []
if name in config:
logging.info("config found for: " + name)
self.module.config = config[name]
if 'setup' in dir(self.module):
self.module.setup()
def register_jobs(self):
if 'crontable' in dir(self.module):
for interval, function in self.module.crontable:
self.jobs.append(Job(interval, eval("self.module."+function)))
logging.info(self.module.crontable)
self.module.crontable = []
else:
self.module.crontable = []
def do(self, function_name, data):
if function_name in dir(self.module):
#this makes the plugin fail with stack trace in debug mode
if not debug:
try:
eval("self.module."+function_name)(data)
except:
dbg("problem in module {} {}".format(function_name, data))
else:
eval("self.module."+function_name)(data)
if "catch_all" in dir(self.module):
try:
self.module.catch_all(data)
except:
dbg("problem in catch all")
def do_jobs(self):
for job in self.jobs:
job.check()
def do_output(self):
output = []
while True:
if 'outputs' in dir(self.module):
if len(self.module.outputs) > 0:
logging.info("output from {}".format(self.module))
output.append(self.module.outputs.pop(0))
else:
break
else:
self.module.outputs = []
return output
class Job(object):
def __init__(self, interval, function):
self.function = function
self.interval = interval
self.lastrun = 0
def __str__(self):
return "{} {} {}".format(self.function, self.interval, self.lastrun)
def __repr__(self):
return self.__str__()
def check(self):
if self.lastrun + self.interval < time.time():
if not debug:
try:
self.function()
except:
dbg("problem")
else:
self.function()
self.lastrun = time.time()
pass
class UnknownChannel(Exception):
pass
def main_loop():
if "LOGFILE" in config:
logging.basicConfig(filename=config["LOGFILE"], level=logging.INFO, format='%(asctime)s %(message)s')
logging.info(directory)
try:
bot.start()
except KeyboardInterrupt:
sys.exit(0)
except:
logging.exception('OOPS')
def parse_args():
parser = ArgumentParser()
parser.add_argument(
'-c',
'--config',
help='Full path to config file.',
metavar='path'
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
directory = os.path.dirname(sys.argv[0])
if not directory.startswith('/'):
directory = os.path.abspath("{}/{}".format(os.getcwd(),
directory
))
config = yaml.load(file(args.config or 'rtmbot.conf', 'r'))
debug = config["DEBUG"]
bot = RtmBot(config["SLACK_TOKEN"])
bot.channel = config["CHANNEL"]
site_plugins = []
files_currently_downloading = []
job_hash = {}
if config.has_key("DAEMON"):
if config["DAEMON"]:
import daemon
with daemon.DaemonContext():
main_loop()
main_loop()
|
|
import json
import logging
import re
import urllib.parse
from urllib.error import HTTPError, URLError
from urllib.request import Request, urlopen
from django.conf import settings
DOMAIN = settings.MATRIX_DOMAIN
URL = settings.MATRIX_URL
class NoSuchUser(Exception):
pass
def _auth_header():
return {'Authorization': f'Bearer {settings.MATRIX_ADMIN_TOKEN}'}
def _matrix_request(url, *args, **kargs):
logger = logging.getLogger('matrix')
method = kargs.get('method', 'GET')
logger.debug('%s %s', method, url)
# convert data from dict to json
if len(args) > 0:
data = args[0]
data = json.dumps(data).encode('utf-8')
args[0] = data
if 'data' in kargs:
data = kargs['data']
data = json.dumps(data).encode('utf-8')
kargs['data'] = data
req = Request(url, *args, **kargs)
try:
response = urlopen(req)
except HTTPError as e:
logger.error(
'The server couldn\'t fulfill the request. Error code: %s',
e.code)
raise e
except URLError as e:
logger.error(
'We failed to reach a server. Reason: %s',
e.reason)
raise e
result_bytes = response.read()
result_str = str(result_bytes, encoding='utf-8')
result_json = json.loads(result_str)
logger.debug(json.dumps(result_json, indent=4))
return result_json
# ##########
# Non-Admin commands
# ##########
def generate_eventid():
import random
letters = '_0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
eventid = '$'
for _ in range(42):
eventid += letters[random.randint(0, len(letters)-1)]
return eventid
def get_event(eventid):
'''
Warning: you need to be in the room, or 403
'''
eventid = urllib.parse.quote(eventid)
return _matrix_request(
f'{URL}_matrix/client/r0/events/{eventid}',
headers=_auth_header(),
)
def get_room_event(roomid, eventid):
roomid = urllib.parse.quote(roomid)
eventid = urllib.parse.quote(eventid)
return _matrix_request(
f'{URL}_matrix/client/r0/rooms/{roomid}/event/{eventid}',
headers=_auth_header(),
)
def redact_event(roomid, eventid, txnid=None):
if txnid is None:
txnid = generate_eventid()
roomid = urllib.parse.quote(roomid)
eventid = urllib.parse.quote(eventid)
txnid = urllib.parse.quote(txnid)
data = {'reason': 'test'}
return _matrix_request(
f'{URL}_matrix/client/r0/rooms/{roomid}/redact/{eventid}/{txnid}',
method='PUT',
headers=_auth_header(),
data=data,
)
# ##########
# Admin commands
# ##########
def get_version():
return _matrix_request(
f'{URL}_synapse/admin/v1/server_version'
)
def get_users_quick():
'''
Yields all users
'''
next_token = '0'
limit = '10'
while next_token:
result = _matrix_request(
f'{URL}_synapse/admin/v2/users'
f'?deactivated=true&limit={limit}&from={next_token}',
headers=_auth_header(),
)
for user in result['users']:
yield user
next_token = result.get('next_token', None)
def localpart(user_id):
re_search = re.compile(f'@(.*):{DOMAIN}')
login = re_search.search(user_id).groups()[0]
return login
def get_users(include_deleted=False):
'''
Yields all users, detailed version
'''
next_token = '0'
limit = '10'
while next_token:
result = _matrix_request(
f'{URL}_synapse/admin/v2/users'
f'?deactivated=true&limit={limit}&from={next_token}',
headers=_auth_header(),
)
for user in result['users']:
user_id = user['name']
user = get_user_info(user_id)
if not include_deleted:
if (( not 'password_hash' in user or not user['password_hash'] )
and not user['threepids']):
# logger.debug(f'{login} is disabled')
continue
yield user
next_token = result.get('next_token', None)
def get_user_info(user_id):
assert user_id.endswith(f':{DOMAIN}')
try:
return _matrix_request(
f'{URL}_synapse/admin/v2/users/{user_id}',
headers=_auth_header(),
)
except HTTPError as e:
if e.code == 404:
raise NoSuchUser
else:
raise e
def get_user_rooms(user_id):
'''
List the rooms that user is in
'''
assert user_id.endswith(f':{DOMAIN}')
try:
return _matrix_request(
f'{URL}_synapse/admin/v1/users/{user_id}/joined_rooms',
headers=_auth_header(),
)
except HTTPError as e:
if e.code == 404:
raise NoSuchUser
else:
raise e
def put_user(user_id, data):
'''
Low level interface to /_synapse/admin/v2/users/<USER>.
'''
assert user_id.endswith(f':{DOMAIN}')
return _matrix_request(
f'{URL}_synapse/admin/v2/users/{user_id}',
headers=_auth_header(),
data=data,
method='PUT',
)
def set_user_info(user_id, name=None, emails=None, admin=None, create=False):
'''
High level interface to create/modify account.
@returns server new information if changed, None otherwise
'''
logger = logging.getLogger('matrix')
assert user_id.endswith(f':{DOMAIN}')
try:
olddata = get_user_info(user_id)
except NoSuchUser as e:
if not create:
logger.error(f"User {user_id} doesn't exists and create=False")
raise e
olddata = {}
data = {}
if name is not None:
if name != olddata.get('displayname', None):
data['displayname'] = name
if emails is not None:
old_emails = [
threepid['address']
for threepid in olddata.get('threepids', [])
if threepid['medium'] == 'email'
]
old_emails = set(old_emails)
emails = set(emails)
# preserve the emails added by the user:
emails = old_emails | emails
if old_emails != emails:
data['threepids'] = [
{'medium': 'email', 'address': email} for email in emails
]
if admin is not None:
data['admin'] = admin
if data:
return put_user(user_id, data)
else:
logger.debug(f'{user_id}: No change.')
return None
def deactivate_account(user_id, erase=True):
assert user_id.endswith(f':{DOMAIN}')
try:
data = {'erase': erase}
return _matrix_request(
url=f'{URL}_synapse/admin/v1/deactivate/{user_id}',
headers=_auth_header(),
data=data,
)
except HTTPError as e:
if e.code == 404:
raise NoSuchUser
else:
raise e
def reset_password(user_id, password):
assert user_id.endswith(f':{DOMAIN}')
data = {
'new_password': password,
'logout_devices': True,
}
return _matrix_request(
f'{URL}_synapse/admin/v1/reset_password/{user_id}',
headers=_auth_header(),
data=data,
)
def room_join(user_id, room):
'''
room is either a id (starting with '!') or an alias (starting with '#')
admin must be in the room...
'''
assert user_id.endswith(f':{DOMAIN}')
data = {
'user_id': user_id,
}
room = urllib.parse.quote(room)
return _matrix_request(
f'{URL}_synapse/admin/v1/join/{room}',
# Example: !636q39766251:server.com, #niceroom:server.com
headers=_auth_header(),
data=data,
)
def get_rooms_quick():
'''
Yields all rooms
'''
next_batch = '0'
limit = '10'
while next_batch:
result = _matrix_request(
f'{URL}_synapse/admin/v1/rooms'
f'?limit={limit}&from={next_batch}',
headers=_auth_header(),
)
for room in result['rooms']:
yield room
next_batch = result.get('next_batch', None)
def get_rooms(show_empty=False, show_private=False):
'''
Yields all rooms
'''
next_batch = '0'
limit = '10'
while next_batch:
result = _matrix_request(
f'{URL}_synapse/admin/v1/rooms'
f'?limit={limit}&from={next_batch}',
headers=_auth_header(),
)
for room in result['rooms']:
nb_members = room.get('joined_members', 0)
if nb_members == 0:
if not show_empty:
continue
elif nb_members <= 2:
if not show_private:
continue
room_id = room['room_id']
room = get_room_info(room_id)
state = get_room_state(room_id)['state']
room['state'] = _room_state_clean(state)
yield room
next_batch = result.get('next_batch', None)
def get_room_info(roomid):
return _matrix_request(
f'{URL}_synapse/admin/v1/rooms/{roomid}',
headers=_auth_header(),
)
def get_room_state(roomid):
'''
consider using _room_state_clean on the return value
'''
return _matrix_request(
f'{URL}_synapse/admin/v1/rooms/{roomid}/state',
headers=_auth_header(),
)
def _room_state_clean(states):
result = {}
for state in states:
statetype = state['type']
content = state['content']
content_length = len(content)
if content_length == 0:
# occurs for redacted events and
# some im.vector.modular.widgets events
continue
if statetype == 'm.room.create':
for key in content:
assert key in ('room_version', 'creator', 'm.federate',
'type'), (
f'Unsupported key {key} in "m.room.create" event state:'
f' {content}')
result.update(content)
elif statetype == 'm.room.member':
if 'members' not in result:
result['members'] = []
member = {
'user_id': state['state_key']
}
member.update(content)
result['members'].append(member)
else:
result[statetype] = content
# elif statetype == 'm.room.power_levels':
# result['m.room.power_levels'] = content
# elif statetype in ('m.room.topic', 'm.room.name',
# 'm.room.history_visibility',
# 'm.room.guest_access'):
# assert content_length == 1, \
# f"Unexpected keys {content.keys()} in content"
# f" for event type {statetype}"
# key = statetype.split('.')[-1]
# result[statetype] = content[key]
# elif statetype == 'm.room.join_rules':
# content_length = len(content)
# assert content_length == 1, f"Unexpected keys {content.keys()}
# in content for event type {statetype}"
# result[statetype] = content['join_rule'] # not join_rules
# elif statetype == 'm.room.canonical_alias':
# for key in content.keys():
# if key not in ('alias', 'alt_aliases'):
# logger.warning(
# f"Unexpected keys {content.keys()} in content"
# f"for event type {statetype}")
# result.update(content)
# elif statetype == 'm.room.encryption':
# assert content_length == 1, f"Unexpected keys
# {content.keys()} in content for event type {statetype}"
# result[statetype] = content['algorithm'] # not encryption
# elif statetype == 'im.vector.modular.widgets':
# logger.warning(f'event type {statetype}: {content}')
# else:
# logger.warning(
# f'Unsupported state type {statetype} in room states.')
return result
def room_get_members(roomid):
'''
Return members (invited are NOT included)
'''
return _matrix_request(
f'{URL}_synapse/admin/v1/rooms/{roomid}/members',
headers=_auth_header(),
)
def room_delete(room):
'''
room is either a id (starting with '!') or an alias (starting with '#')
'''
data = {
}
room = urllib.parse.quote(room)
return _matrix_request(
f'{URL}_synapse/admin/v1/rooms/{room}',
# Example: !636q39766251:server.com, #niceroom:server.com
method='DELETE',
headers=_auth_header(),
data=data,
)
def room_makeadmin(room, user_id=None):
'''
room is either a id (starting with '!') or an alias (starting with '#')
'''
data = {}
if user_id:
assert user_id.endswith(f':{DOMAIN}')
data['user_id'] = user_id
room = urllib.parse.quote(room)
return _matrix_request(
f'{URL}_synapse/admin/v1/rooms/{room}/make_room_admin',
# Example: !636q39766251:server.com, #niceroom:server.com
method='POST',
headers=_auth_header(),
data=data,
)
# #####################
# Regular user commands
# #####################
def room_invite(room, user_id):
room = urllib.parse.quote(room)
return _matrix_request(
f'{URL}_matrix/client/r0/rooms/{room}/invite',
method='POST',
headers=_auth_header(),
data={"user_id": user_id},
)
def room_kick(room, user_id):
room = urllib.parse.quote(room)
return _matrix_request(
f'{URL}_matrix/client/r0/rooms/{room}/kick',
method='POST',
headers=_auth_header(),
data={"user_id": user_id},
)
|
|
"""
Objective-C runtime wrapper for use by LLDB Python formatters
Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
See https://llvm.org/LICENSE.txt for license information.
SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""
import lldb
import lldb.formatters.cache
import lldb.formatters.attrib_fromdict
import functools
import lldb.formatters.Logger
class Utilities:
@staticmethod
def read_ascii(process, pointer, max_len=128):
logger = lldb.formatters.Logger.Logger()
error = lldb.SBError()
content = None
try:
content = process.ReadCStringFromMemory(pointer, max_len, error)
except:
pass
if content is None or len(content) == 0 or error.fail:
return None
return content
@staticmethod
def is_valid_pointer(pointer, pointer_size, allow_tagged=0, allow_NULL=0):
logger = lldb.formatters.Logger.Logger()
if pointer is None:
return 0
if pointer == 0:
return allow_NULL
if allow_tagged and (pointer % 2) == 1:
return 1
return ((pointer % pointer_size) == 0)
# Objective-C runtime has a rule that pointers in a class_t will only have bits 0 thru 46 set
# so if any pointer has bits 47 thru 63 high we know that this is not a
# valid isa
@staticmethod
def is_allowed_pointer(pointer):
logger = lldb.formatters.Logger.Logger()
if pointer is None:
return 0
return ((pointer & 0xFFFF800000000000) == 0)
@staticmethod
def read_child_of(valobj, offset, type):
logger = lldb.formatters.Logger.Logger()
if offset == 0 and type.GetByteSize() == valobj.GetByteSize():
return valobj.GetValueAsUnsigned()
child = valobj.CreateChildAtOffset("childUNK", offset, type)
if child is None or child.IsValid() == 0:
return None
return child.GetValueAsUnsigned()
@staticmethod
def is_valid_identifier(name):
logger = lldb.formatters.Logger.Logger()
if name is None:
return None
if len(name) == 0:
return None
# technically, the ObjC runtime does not enforce any rules about what name a class can have
# in practice, the commonly used byte values for a class name are the letters, digits and some
# symbols: $, %, -, _, .
# WARNING: this means that you cannot use this runtime implementation if you need to deal
# with class names that use anything but what is allowed here
ok_values = dict.fromkeys(
"$%_.-ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890")
return all(c in ok_values for c in name)
@staticmethod
def check_is_osx_lion(target):
logger = lldb.formatters.Logger.Logger()
# assume the only thing that has a Foundation.framework is a Mac
# assume anything < Lion does not even exist
try:
mod = target.module['Foundation']
except:
mod = None
if mod is None or mod.IsValid() == 0:
return None
ver = mod.GetVersion()
if ver is None or ver == []:
return None
return (ver[0] < 900)
# a utility method that factors out code common to almost all the formatters
# takes in an SBValue and a metrics object
# returns a class_data and a wrapper (or None, if the runtime alone can't
# decide on a wrapper)
@staticmethod
def prepare_class_detection(valobj, statistics):
logger = lldb.formatters.Logger.Logger()
class_data = ObjCRuntime(valobj)
if class_data.is_valid() == 0:
statistics.metric_hit('invalid_pointer', valobj)
wrapper = InvalidPointer_Description(
valobj.GetValueAsUnsigned(0) == 0)
return class_data, wrapper
class_data = class_data.read_class_data()
if class_data.is_valid() == 0:
statistics.metric_hit('invalid_isa', valobj)
wrapper = InvalidISA_Description()
return class_data, wrapper
if class_data.is_kvo():
class_data = class_data.get_superclass()
if class_data.class_name() == '_NSZombie_OriginalClass':
wrapper = ThisIsZombie_Description()
return class_data, wrapper
return class_data, None
class RoT_Data:
def __init__(self, rot_pointer, params):
logger = lldb.formatters.Logger.Logger()
if (Utilities.is_valid_pointer(rot_pointer.GetValueAsUnsigned(),
params.pointer_size, allow_tagged=0)):
self.sys_params = params
self.valobj = rot_pointer
#self.flags = Utilities.read_child_of(self.valobj,0,self.sys_params.uint32_t)
#self.instanceStart = Utilities.read_child_of(self.valobj,4,self.sys_params.uint32_t)
self.instanceSize = None # lazy fetching
offset = 24 if self.sys_params.is_64_bit else 16
#self.ivarLayoutPtr = Utilities.read_child_of(self.valobj,offset,self.sys_params.addr_ptr_type)
self.namePointer = Utilities.read_child_of(
self.valobj, offset, self.sys_params.types_cache.addr_ptr_type)
self.valid = 1 # self.check_valid()
else:
logger >> "Marking as invalid - rot is invalid"
self.valid = 0
if self.valid:
self.name = Utilities.read_ascii(
self.valobj.GetTarget().GetProcess(), self.namePointer)
if not(Utilities.is_valid_identifier(self.name)):
logger >> "Marking as invalid - name is invalid"
self.valid = 0
# perform sanity checks on the contents of this class_ro_t
def check_valid(self):
self.valid = 1
# misaligned pointers seem to be possible for this field
# if not(Utilities.is_valid_pointer(self.namePointer,self.sys_params.pointer_size,allow_tagged=0)):
# self.valid = 0
# pass
def __str__(self):
logger = lldb.formatters.Logger.Logger()
return \
"instanceSize = " + hex(self.instance_size()) + "\n" + \
"namePointer = " + hex(self.namePointer) + " --> " + self.name
def is_valid(self):
return self.valid
def instance_size(self, align=0):
logger = lldb.formatters.Logger.Logger()
if self.is_valid() == 0:
return None
if self.instanceSize is None:
self.instanceSize = Utilities.read_child_of(
self.valobj, 8, self.sys_params.types_cache.uint32_t)
if align:
unalign = self.instance_size(0)
if self.sys_params.is_64_bit:
return ((unalign + 7) & ~7) % 0x100000000
else:
return ((unalign + 3) & ~3) % 0x100000000
else:
return self.instanceSize
class RwT_Data:
def __init__(self, rwt_pointer, params):
logger = lldb.formatters.Logger.Logger()
if (Utilities.is_valid_pointer(rwt_pointer.GetValueAsUnsigned(),
params.pointer_size, allow_tagged=0)):
self.sys_params = params
self.valobj = rwt_pointer
#self.flags = Utilities.read_child_of(self.valobj,0,self.sys_params.uint32_t)
#self.version = Utilities.read_child_of(self.valobj,4,self.sys_params.uint32_t)
self.roPointer = Utilities.read_child_of(
self.valobj, 8, self.sys_params.types_cache.addr_ptr_type)
self.check_valid()
else:
logger >> "Marking as invalid - rwt is invald"
self.valid = 0
if self.valid:
self.rot = self.valobj.CreateValueFromData(
"rot", lldb.SBData.CreateDataFromUInt64Array(
self.sys_params.endianness, self.sys_params.pointer_size, [
self.roPointer]), self.sys_params.types_cache.addr_ptr_type)
# self.rot = self.valobj.CreateValueFromAddress("rot",self.roPointer,self.sys_params.types_cache.addr_ptr_type).AddressOf()
self.data = RoT_Data(self.rot, self.sys_params)
# perform sanity checks on the contents of this class_rw_t
def check_valid(self):
logger = lldb.formatters.Logger.Logger()
self.valid = 1
if not(
Utilities.is_valid_pointer(
self.roPointer,
self.sys_params.pointer_size,
allow_tagged=0)):
logger >> "Marking as invalid - ropointer is invalid"
self.valid = 0
def __str__(self):
logger = lldb.formatters.Logger.Logger()
return \
"roPointer = " + hex(self.roPointer)
def is_valid(self):
logger = lldb.formatters.Logger.Logger()
if self.valid:
return self.data.is_valid()
return 0
class Class_Data_V2:
def __init__(self, isa_pointer, params):
logger = lldb.formatters.Logger.Logger()
if (isa_pointer is not None) and (Utilities.is_valid_pointer(
isa_pointer.GetValueAsUnsigned(), params.pointer_size, allow_tagged=0)):
self.sys_params = params
self.valobj = isa_pointer
self.check_valid()
else:
logger >> "Marking as invalid - isa is invalid or None"
self.valid = 0
if self.valid:
self.rwt = self.valobj.CreateValueFromData(
"rwt", lldb.SBData.CreateDataFromUInt64Array(
self.sys_params.endianness, self.sys_params.pointer_size, [
self.dataPointer]), self.sys_params.types_cache.addr_ptr_type)
# self.rwt = self.valobj.CreateValueFromAddress("rwt",self.dataPointer,self.sys_params.types_cache.addr_ptr_type).AddressOf()
self.data = RwT_Data(self.rwt, self.sys_params)
# perform sanity checks on the contents of this class_t
# this call tries to minimize the amount of data fetched- as soon as we have "proven"
# that we have an invalid object, we stop reading
def check_valid(self):
logger = lldb.formatters.Logger.Logger()
self.valid = 1
self.isaPointer = Utilities.read_child_of(
self.valobj, 0, self.sys_params.types_cache.addr_ptr_type)
if not(
Utilities.is_valid_pointer(
self.isaPointer,
self.sys_params.pointer_size,
allow_tagged=0)):
logger >> "Marking as invalid - isaPointer is invalid"
self.valid = 0
return
if not(Utilities.is_allowed_pointer(self.isaPointer)):
logger >> "Marking as invalid - isaPointer is not allowed"
self.valid = 0
return
self.cachePointer = Utilities.read_child_of(
self.valobj,
2 * self.sys_params.pointer_size,
self.sys_params.types_cache.addr_ptr_type)
if not(
Utilities.is_valid_pointer(
self.cachePointer,
self.sys_params.pointer_size,
allow_tagged=0)):
logger >> "Marking as invalid - cachePointer is invalid"
self.valid = 0
return
if not(Utilities.is_allowed_pointer(self.cachePointer)):
logger >> "Marking as invalid - cachePointer is not allowed"
self.valid = 0
return
self.dataPointer = Utilities.read_child_of(
self.valobj,
4 * self.sys_params.pointer_size,
self.sys_params.types_cache.addr_ptr_type)
if not(
Utilities.is_valid_pointer(
self.dataPointer,
self.sys_params.pointer_size,
allow_tagged=0)):
logger >> "Marking as invalid - dataPointer is invalid"
self.valid = 0
return
if not(Utilities.is_allowed_pointer(self.dataPointer)):
logger >> "Marking as invalid - dataPointer is not allowed"
self.valid = 0
return
self.superclassIsaPointer = Utilities.read_child_of(
self.valobj,
1 * self.sys_params.pointer_size,
self.sys_params.types_cache.addr_ptr_type)
if not(
Utilities.is_valid_pointer(
self.superclassIsaPointer,
self.sys_params.pointer_size,
allow_tagged=0,
allow_NULL=1)):
logger >> "Marking as invalid - superclassIsa is invalid"
self.valid = 0
return
if not(Utilities.is_allowed_pointer(self.superclassIsaPointer)):
logger >> "Marking as invalid - superclassIsa is not allowed"
self.valid = 0
return
# in general, KVO is implemented by transparently subclassing
# however, there could be exceptions where a class does something else
# internally to implement the feature - this method will have no clue that a class
# has been KVO'ed unless the standard implementation technique is used
def is_kvo(self):
logger = lldb.formatters.Logger.Logger()
if self.is_valid():
if self.class_name().startswith("NSKVONotifying_"):
return 1
return 0
# some CF classes have a valid ObjC isa in their CFRuntimeBase
# but instead of being class-specific this isa points to a match-'em-all class
# which is __NSCFType (the versions without __ also exists and we are matching to it
# just to be on the safe side)
def is_cftype(self):
logger = lldb.formatters.Logger.Logger()
if self.is_valid():
return self.class_name() == '__NSCFType' or self.class_name() == 'NSCFType'
def get_superclass(self):
logger = lldb.formatters.Logger.Logger()
if self.is_valid():
parent_isa_pointer = self.valobj.CreateChildAtOffset(
"parent_isa", self.sys_params.pointer_size, self.sys_params.addr_ptr_type)
return Class_Data_V2(parent_isa_pointer, self.sys_params)
else:
return None
def class_name(self):
logger = lldb.formatters.Logger.Logger()
if self.is_valid():
return self.data.data.name
else:
return None
def is_valid(self):
logger = lldb.formatters.Logger.Logger()
if self.valid:
return self.data.is_valid()
return 0
def __str__(self):
logger = lldb.formatters.Logger.Logger()
return 'isaPointer = ' + hex(self.isaPointer) + "\n" + \
"superclassIsaPointer = " + hex(self.superclassIsaPointer) + "\n" + \
"cachePointer = " + hex(self.cachePointer) + "\n" + \
"data = " + hex(self.dataPointer)
def is_tagged(self):
return 0
def instance_size(self, align=0):
logger = lldb.formatters.Logger.Logger()
if self.is_valid() == 0:
return None
return self.rwt.rot.instance_size(align)
# runtime v1 is much less intricate than v2 and stores relevant
# information directly in the class_t object
class Class_Data_V1:
def __init__(self, isa_pointer, params):
logger = lldb.formatters.Logger.Logger()
if (isa_pointer is not None) and (Utilities.is_valid_pointer(
isa_pointer.GetValueAsUnsigned(), params.pointer_size, allow_tagged=0)):
self.valid = 1
self.sys_params = params
self.valobj = isa_pointer
self.check_valid()
else:
logger >> "Marking as invalid - isaPointer is invalid or None"
self.valid = 0
if self.valid:
self.name = Utilities.read_ascii(
self.valobj.GetTarget().GetProcess(), self.namePointer)
if not(Utilities.is_valid_identifier(self.name)):
logger >> "Marking as invalid - name is not valid"
self.valid = 0
# perform sanity checks on the contents of this class_t
def check_valid(self):
logger = lldb.formatters.Logger.Logger()
self.valid = 1
self.isaPointer = Utilities.read_child_of(
self.valobj, 0, self.sys_params.types_cache.addr_ptr_type)
if not(
Utilities.is_valid_pointer(
self.isaPointer,
self.sys_params.pointer_size,
allow_tagged=0)):
logger >> "Marking as invalid - isaPointer is invalid"
self.valid = 0
return
self.superclassIsaPointer = Utilities.read_child_of(
self.valobj,
1 * self.sys_params.pointer_size,
self.sys_params.types_cache.addr_ptr_type)
if not(
Utilities.is_valid_pointer(
self.superclassIsaPointer,
self.sys_params.pointer_size,
allow_tagged=0,
allow_NULL=1)):
logger >> "Marking as invalid - superclassIsa is invalid"
self.valid = 0
return
self.namePointer = Utilities.read_child_of(
self.valobj,
2 * self.sys_params.pointer_size,
self.sys_params.types_cache.addr_ptr_type)
# if not(Utilities.is_valid_pointer(self.namePointer,self.sys_params.pointer_size,allow_tagged=0,allow_NULL=0)):
# self.valid = 0
# return
# in general, KVO is implemented by transparently subclassing
# however, there could be exceptions where a class does something else
# internally to implement the feature - this method will have no clue that a class
# has been KVO'ed unless the standard implementation technique is used
def is_kvo(self):
logger = lldb.formatters.Logger.Logger()
if self.is_valid():
if self.class_name().startswith("NSKVONotifying_"):
return 1
return 0
# some CF classes have a valid ObjC isa in their CFRuntimeBase
# but instead of being class-specific this isa points to a match-'em-all class
# which is __NSCFType (the versions without __ also exists and we are matching to it
# just to be on the safe side)
def is_cftype(self):
logger = lldb.formatters.Logger.Logger()
if self.is_valid():
return self.class_name() == '__NSCFType' or self.class_name() == 'NSCFType'
def get_superclass(self):
logger = lldb.formatters.Logger.Logger()
if self.is_valid():
parent_isa_pointer = self.valobj.CreateChildAtOffset(
"parent_isa", self.sys_params.pointer_size, self.sys_params.addr_ptr_type)
return Class_Data_V1(parent_isa_pointer, self.sys_params)
else:
return None
def class_name(self):
logger = lldb.formatters.Logger.Logger()
if self.is_valid():
return self.name
else:
return None
def is_valid(self):
return self.valid
def __str__(self):
logger = lldb.formatters.Logger.Logger()
return 'isaPointer = ' + hex(self.isaPointer) + "\n" + \
"superclassIsaPointer = " + hex(self.superclassIsaPointer) + "\n" + \
"namePointer = " + hex(self.namePointer) + " --> " + self.name + \
"instanceSize = " + hex(self.instanceSize()) + "\n"
def is_tagged(self):
return 0
def instance_size(self, align=0):
logger = lldb.formatters.Logger.Logger()
if self.is_valid() == 0:
return None
if self.instanceSize is None:
self.instanceSize = Utilities.read_child_of(
self.valobj,
5 * self.sys_params.pointer_size,
self.sys_params.types_cache.addr_ptr_type)
if align:
unalign = self.instance_size(0)
if self.sys_params.is_64_bit:
return ((unalign + 7) & ~7) % 0x100000000
else:
return ((unalign + 3) & ~3) % 0x100000000
else:
return self.instanceSize
# these are the only tagged pointers values for current versions
# of OSX - they might change in future OS releases, and no-one is
# advised to rely on these values, or any of the bitmasking formulas
# in TaggedClass_Data. doing otherwise is at your own risk
TaggedClass_Values_Lion = {1: 'NSNumber',
5: 'NSManagedObject',
6: 'NSDate',
7: 'NSDateTS'}
TaggedClass_Values_NMOS = {0: 'NSAtom',
3: 'NSNumber',
4: 'NSDateTS',
5: 'NSManagedObject',
6: 'NSDate'}
class TaggedClass_Data:
def __init__(self, pointer, params):
logger = lldb.formatters.Logger.Logger()
global TaggedClass_Values_Lion, TaggedClass_Values_NMOS
self.valid = 1
self.name = None
self.sys_params = params
self.valobj = pointer
self.val = (pointer & ~0x0000000000000000FF) >> 8
self.class_bits = (pointer & 0xE) >> 1
self.i_bits = (pointer & 0xF0) >> 4
if self.sys_params.is_lion:
if self.class_bits in TaggedClass_Values_Lion:
self.name = TaggedClass_Values_Lion[self.class_bits]
else:
logger >> "Marking as invalid - not a good tagged pointer for Lion"
self.valid = 0
else:
if self.class_bits in TaggedClass_Values_NMOS:
self.name = TaggedClass_Values_NMOS[self.class_bits]
else:
logger >> "Marking as invalid - not a good tagged pointer for NMOS"
self.valid = 0
def is_valid(self):
return self.valid
def class_name(self):
logger = lldb.formatters.Logger.Logger()
if self.is_valid():
return self.name
else:
return 0
def value(self):
return self.val if self.is_valid() else None
def info_bits(self):
return self.i_bits if self.is_valid() else None
def is_kvo(self):
return 0
def is_cftype(self):
return 0
# we would need to go around looking for the superclass or ask the runtime
# for now, we seem not to require support for this operation so we will merrily
# pretend to be at a root point in the hierarchy
def get_superclass(self):
return None
# anything that is handled here is tagged
def is_tagged(self):
return 1
# it seems reasonable to say that a tagged pointer is the size of a pointer
def instance_size(self, align=0):
logger = lldb.formatters.Logger.Logger()
if self.is_valid() == 0:
return None
return self.sys_params.pointer_size
class InvalidClass_Data:
def __init__(self):
pass
def is_valid(self):
return 0
class Version:
def __init__(self, major, minor, release, build_string):
self._major = major
self._minor = minor
self._release = release
self._build_string = build_string
def get_major(self):
return self._major
def get_minor(self):
return self._minor
def get_release(self):
return self._release
def get_build_string(self):
return self._build_string
major = property(get_major, None)
minor = property(get_minor, None)
release = property(get_release, None)
build_string = property(get_build_string, None)
def __lt__(self, other):
if (self.major < other.major):
return 1
if (self.minor < other.minor):
return 1
if (self.release < other.release):
return 1
# build strings are not compared since they are heavily platform-dependent and might not always
# be available
return 0
def __eq__(self, other):
return (self.major == other.major) and \
(self.minor == other.minor) and \
(self.release == other.release) and \
(self.build_string == other.build_string)
# Python 2.6 doesn't have functools.total_ordering, so we have to implement
# other comparators
def __gt__(self, other):
return other < self
def __le__(self, other):
return not other < self
def __ge__(self, other):
return not self < other
runtime_version = lldb.formatters.cache.Cache()
os_version = lldb.formatters.cache.Cache()
types_caches = lldb.formatters.cache.Cache()
isa_caches = lldb.formatters.cache.Cache()
class SystemParameters:
def __init__(self, valobj):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture(valobj)
self.adjust_for_process(valobj)
def adjust_for_process(self, valobj):
logger = lldb.formatters.Logger.Logger()
global runtime_version
global os_version
global types_caches
global isa_caches
process = valobj.GetTarget().GetProcess()
# using the unique ID for added guarantees (see svn revision 172628 for
# further details)
self.pid = process.GetUniqueID()
if runtime_version.look_for_key(self.pid):
self.runtime_version = runtime_version.get_value(self.pid)
else:
self.runtime_version = ObjCRuntime.runtime_version(process)
runtime_version.add_item(self.pid, self.runtime_version)
if os_version.look_for_key(self.pid):
self.is_lion = os_version.get_value(self.pid)
else:
self.is_lion = Utilities.check_is_osx_lion(valobj.GetTarget())
os_version.add_item(self.pid, self.is_lion)
if types_caches.look_for_key(self.pid):
self.types_cache = types_caches.get_value(self.pid)
else:
self.types_cache = lldb.formatters.attrib_fromdict.AttributesDictionary(
allow_reset=0)
self.types_cache.addr_type = valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedLong)
self.types_cache.addr_ptr_type = self.types_cache.addr_type.GetPointerType()
self.types_cache.uint32_t = valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedInt)
types_caches.add_item(self.pid, self.types_cache)
if isa_caches.look_for_key(self.pid):
self.isa_cache = isa_caches.get_value(self.pid)
else:
self.isa_cache = lldb.formatters.cache.Cache()
isa_caches.add_item(self.pid, self.isa_cache)
def adjust_for_architecture(self, valobj):
process = valobj.GetTarget().GetProcess()
self.pointer_size = process.GetAddressByteSize()
self.is_64_bit = (self.pointer_size == 8)
self.endianness = process.GetByteOrder()
self.is_little = (self.endianness == lldb.eByteOrderLittle)
self.cfruntime_size = 16 if self.is_64_bit else 8
# a simple helper function that makes it more explicit that one is calculating
# an offset that is made up of X pointers and Y bytes of additional data
# taking into account pointer size - if you know there is going to be some padding
# you can pass that in and it will be taken into account (since padding may be different between
# 32 and 64 bit versions, you can pass padding value for both, the right
# one will be used)
def calculate_offset(
self,
num_pointers=0,
bytes_count=0,
padding32=0,
padding64=0):
value = bytes_count + num_pointers * self.pointer_size
return value + padding64 if self.is_64_bit else value + padding32
class ObjCRuntime:
# the ObjC runtime has no explicit "version" field that we can use
# instead, we discriminate v1 from v2 by looking for the presence
# of a well-known section only present in v1
@staticmethod
def runtime_version(process):
logger = lldb.formatters.Logger.Logger()
if process.IsValid() == 0:
logger >> "No process - bailing out"
return None
target = process.GetTarget()
num_modules = target.GetNumModules()
module_objc = None
for idx in range(num_modules):
module = target.GetModuleAtIndex(idx)
if module.GetFileSpec().GetFilename() == 'libobjc.A.dylib':
module_objc = module
break
if module_objc is None or module_objc.IsValid() == 0:
logger >> "no libobjc - bailing out"
return None
num_sections = module.GetNumSections()
section_objc = None
for idx in range(num_sections):
section = module.GetSectionAtIndex(idx)
if section.GetName() == '__OBJC':
section_objc = section
break
if section_objc is not None and section_objc.IsValid():
logger >> "found __OBJC: v1"
return 1
logger >> "no __OBJC: v2"
return 2
@staticmethod
def runtime_from_isa(isa):
logger = lldb.formatters.Logger.Logger()
runtime = ObjCRuntime(isa)
runtime.isa = isa
return runtime
def __init__(self, valobj):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.adjust_for_architecture()
self.sys_params = SystemParameters(self.valobj)
self.unsigned_value = self.valobj.GetValueAsUnsigned()
self.isa_value = None
def adjust_for_architecture(self):
pass
# an ObjC pointer can either be tagged or must be aligned
def is_tagged(self):
logger = lldb.formatters.Logger.Logger()
if self.valobj is None:
return 0
return (
Utilities.is_valid_pointer(
self.unsigned_value,
self.sys_params.pointer_size,
allow_tagged=1) and not(
Utilities.is_valid_pointer(
self.unsigned_value,
self.sys_params.pointer_size,
allow_tagged=0)))
def is_valid(self):
logger = lldb.formatters.Logger.Logger()
if self.valobj is None:
return 0
if self.valobj.IsInScope() == 0:
return 0
return Utilities.is_valid_pointer(
self.unsigned_value,
self.sys_params.pointer_size,
allow_tagged=1)
def is_nil(self):
return self.unsigned_value == 0
def read_isa(self):
logger = lldb.formatters.Logger.Logger()
if self.isa_value is not None:
logger >> "using cached isa"
return self.isa_value
self.isa_pointer = self.valobj.CreateChildAtOffset(
"cfisa", 0, self.sys_params.types_cache.addr_ptr_type)
if self.isa_pointer is None or self.isa_pointer.IsValid() == 0:
logger >> "invalid isa - bailing out"
return None
self.isa_value = self.isa_pointer.GetValueAsUnsigned(1)
if self.isa_value == 1:
logger >> "invalid isa value - bailing out"
return None
return Ellipsis
def read_class_data(self):
logger = lldb.formatters.Logger.Logger()
global isa_cache
if self.is_tagged():
# tagged pointers only exist in ObjC v2
if self.sys_params.runtime_version == 2:
logger >> "on v2 and tagged - maybe"
# not every odd-valued pointer is actually tagged. most are just plain wrong
# we could try and predetect this before even creating a TaggedClass_Data object
# but unless performance requires it, this seems a cleaner way
# to tackle the task
tentative_tagged = TaggedClass_Data(
self.unsigned_value, self.sys_params)
if tentative_tagged.is_valid():
logger >> "truly tagged"
return tentative_tagged
else:
logger >> "not tagged - error"
return InvalidClass_Data()
else:
logger >> "on v1 and tagged - error"
return InvalidClass_Data()
if self.is_valid() == 0 or self.read_isa() is None:
return InvalidClass_Data()
data = self.sys_params.isa_cache.get_value(
self.isa_value, default=None)
if data is not None:
return data
if self.sys_params.runtime_version == 2:
data = Class_Data_V2(self.isa_pointer, self.sys_params)
else:
data = Class_Data_V1(self.isa_pointer, self.sys_params)
if data is None:
return InvalidClass_Data()
if data.is_valid():
self.sys_params.isa_cache.add_item(
self.isa_value, data, ok_to_replace=1)
return data
# these classes below can be used by the data formatters to provide a
# consistent message that describes a given runtime-generated situation
class SpecialSituation_Description:
def message(self):
return ''
class InvalidPointer_Description(SpecialSituation_Description):
def __init__(self, nil):
self.is_nil = nil
def message(self):
if self.is_nil:
return '@"<nil>"'
else:
return '<invalid pointer>'
class InvalidISA_Description(SpecialSituation_Description):
def __init__(self):
pass
def message(self):
return '<not an Objective-C object>'
class ThisIsZombie_Description(SpecialSituation_Description):
def message(self):
return '<freed object>'
|
|
#Copyright 2007-2009 WebDriver committers
#Copyright 2007-2009 Google Inc.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import pytest
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import ElementNotVisibleException
from selenium.common.exceptions import InvalidElementStateException
from selenium.common.exceptions import NoAlertPresentException
from selenium.common.exceptions import UnexpectedAlertPresentException
import unittest
@pytest.mark.ignore_opera
class AlertsTest(unittest.TestCase):
def testShouldBeAbleToOverrideTheWindowAlertMethod(self):
self._loadPage("alerts")
self.driver.execute_script(
"window.alert = function(msg) { document.getElementById('text').innerHTML = msg; }")
self.driver.find_element(by=By.ID, value="alert").click()
try:
self.assertEqual(self.driver.find_element_by_id('text').text, "cheese")
except Exception as e:
# if we're here, likely the alert is displayed
# not dismissing it will affect other tests
try:
self._waitForAlert().dismiss()
except Exception:
pass
raise e
def testShouldAllowUsersToAcceptAnAlertManually(self):
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="alert").click()
alert = self._waitForAlert()
alert.accept()
# If we can perform any action, we're good to go
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldAllowUsersToAcceptAnAlertWithNoTextManually(self):
self._loadPage("alerts")
self.driver.find_element(By.ID,"empty-alert").click();
alert = self._waitForAlert()
alert.accept()
# If we can perform any action, we're good to go
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldGetTextOfAlertOpenedInSetTimeout(self):
self._loadPage("alerts")
self.driver.find_element_by_id("slow-alert").click()
# DO NOT WAIT OR SLEEP HERE
# This is a regression test for a bug where only the first switchTo call would throw,
# and only if it happens before the alert actually loads.
alert = self._waitForAlert()
try:
self.assertEqual("Slow", alert.text)
finally:
alert.accept()
@pytest.mark.ignore_chrome
def testShouldAllowUsersToDismissAnAlertManually(self):
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="alert").click()
alert = self._waitForAlert()
alert.dismiss()
# If we can perform any action, we're good to go
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldAllowAUserToAcceptAPrompt(self):
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="prompt").click()
alert = self._waitForAlert()
alert.accept()
# If we can perform any action, we're good to go
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldAllowAUserToDismissAPrompt(self):
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="prompt").click()
alert = self._waitForAlert()
alert.dismiss()
# If we can perform any action, we're good to go
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldAllowAUserToSetTheValueOfAPrompt(self):
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="prompt").click()
alert = self._waitForAlert()
alert.send_keys("cheese")
alert.accept()
result = self.driver.find_element(by=By.ID, value="text").text
self.assertEqual("cheese", result)
def testSettingTheValueOfAnAlertThrows(self):
self._loadPage("alerts")
self.driver.find_element(By.ID,"alert").click();
alert = self._waitForAlert()
try:
alert.send_keys("cheese");
self.fail("Expected exception");
except ElementNotVisibleException:
pass
except InvalidElementStateException:
pass
finally:
alert.accept()
def testAlertShouldNotAllowAdditionalCommandsIfDimissed(self):
self._loadPage("alerts");
self.driver.find_element(By.ID, "alert").click()
alert = self._waitForAlert()
alert.dismiss()
try:
alert.text
self.fail("Expected NoAlertPresentException")
except NoAlertPresentException:
pass
def testShouldAllowUsersToAcceptAnAlertInAFrame(self):
self._loadPage("alerts")
self.driver.switch_to.frame("iframeWithAlert")
self.driver.find_element_by_id("alertInFrame").click()
alert = self._waitForAlert()
alert.accept()
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldAllowUsersToAcceptAnAlertInANestedFrame(self):
self._loadPage("alerts")
self.driver.switch_to.frame("iframeWithIframe")
self.driver.switch_to.frame("iframeWithAlert")
self.driver.find_element_by_id("alertInFrame").click()
alert = self._waitForAlert()
alert.accept()
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldThrowAnExceptionIfAnAlertHasNotBeenDealtWithAndDismissTheAlert(self):
pass
# //TODO(David) Complete this test
def testPromptShouldUseDefaultValueIfNoKeysSent(self):
self._loadPage("alerts")
self.driver.find_element(By.ID, "prompt-with-default").click()
alert = self._waitForAlert()
alert.accept()
txt = self.driver.find_element(By.ID, "text").text
self.assertEqual("This is a default value", txt)
def testPromptShouldHaveNullValueIfDismissed(self):
self._loadPage("alerts")
self.driver.find_element(By.ID, "prompt-with-default").click()
alert = self._waitForAlert()
alert.dismiss()
self.assertEqual("null", self.driver.find_element(By.ID, "text").text)
def testHandlesTwoAlertsFromOneInteraction(self):
self._loadPage("alerts")
self.driver.find_element(By.ID, "double-prompt").click()
alert1 = self._waitForAlert()
alert1.send_keys("brie")
alert1.accept()
alert2 = self._waitForAlert()
alert2.send_keys("cheddar")
alert2.accept();
self.assertEqual(self.driver.find_element(By.ID, "text1").text, "brie")
self.assertEqual(self.driver.find_element(By.ID, "text2").text, "cheddar")
def testShouldHandleAlertOnPageLoad(self):
self._loadPage("alerts")
self.driver.find_element(By.ID, "open-page-with-onload-alert").click()
alert = self._waitForAlert()
value = alert.text
alert.accept()
self.assertEquals("onload", value)
def testShouldAllowTheUserToGetTheTextOfAnAlert(self):
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="alert").click()
alert = self._waitForAlert()
value = alert.text
alert.accept()
self.assertEqual("cheese", value)
def testUnexpectedAlertPresentExceptionContainsAlertText(self):
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="alert").click()
alert = self._waitForAlert()
value = alert.text
try:
self._loadPage("simpleTest")
raise Exception("UnexpectedAlertPresentException should have been thrown")
except UnexpectedAlertPresentException as uape:
self.assertEquals(value, uape.alert_text)
self.assertTrue(str(uape).startswith("Alert Text: %s" % value))
def _waitForAlert(self):
return WebDriverWait(self.driver, 3).until(EC.alert_is_present())
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
try:
# just in case a previous test left open an alert
self.driver.switch_to.alert().dismiss()
except:
pass
self.driver.get(self._pageURL(name))
|
|
# coding: utf-8
# In[ ]:
# opengrid imports
from opengrid.library import misc, houseprint, caching, analysis
from opengrid import config
c=config.Config()
# other imports
import pandas as pd
import charts
import numpy as np
import os
# configuration for the plots
DEV = c.get('env', 'type') == 'dev' # DEV is True if we are in development environment, False if on the droplet
if not DEV:
# production environment: don't try to display plots
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.dates import HourLocator, DateFormatter, AutoDateLocator, num2date
if DEV:
if c.get('env', 'plots') == 'inline':
get_ipython().magic(u'matplotlib inline')
else:
get_ipython().magic(u'matplotlib qt')
else:
pass # don't try to render plots
plt.rcParams['figure.figsize'] = 12,8
# In[ ]:
hp = houseprint.Houseprint()
sensors = hp.get_sensors(sensortype='electricity') # sensor objects
# Remove some sensors
exclude = [
'565de0a7dc64d8370aa321491217b85f' # 3E
]
solar = [x.key for x in hp.search_sensors(type='electricity', system='solar')]
exclude += solar
for s in sensors:
if s.key in exclude:
sensors.remove(s)
hp.init_tmpo()
# In[ ]:
#hp.sync_tmpos()
# In[ ]:
# The first time, this will take a very looong time to get all the detailed data for building the cache
# Afterwards, this is quick
caching.cache_results(hp=hp, sensors=sensors, function='daily_min', resultname='elec_daily_min')
caching.cache_results(hp=hp, sensors=sensors, function='daily_max', resultname='elec_daily_max')
# In[ ]:
cache_min = caching.Cache(variable='elec_daily_min')
cache_max = caching.Cache(variable='elec_daily_max')
dfdaymin = cache_min.get(sensors=sensors)
dfdaymax = cache_max.get(sensors=sensors)
# The next plot shows that some periods are missing. Due to the cumulative nature of the electricity counter, we still have the total consumption. However, it is spread out of the entire period. So we don't know the standby power during these days, and we have to remove those days.
# In[ ]:
if DEV:
sensor = hp.search_sensors(key='3aa4')[0]
df = sensor.get_data(head=pd.Timestamp('20151117'), tail=pd.Timestamp('20160104'))
charts.plot(df, stock=True, show='inline')
# In[ ]:
# Clean out the data:
# First remove days with too low values to be realistic
dfdaymin[dfdaymin < 10] = np.nan
# Now remove days where the minimum=maximum (within 1 Watt difference)
dfdaymin[(dfdaymax - dfdaymin) < 1] = np.nan
# In[ ]:
if DEV:
charts.plot(dfdaymin, stock=True, show='inline')
# In[ ]:
DEV
# In[ ]:
standby_statistics = dfdaymin.T.describe(percentiles=[0.1,0.5,0.9]).T
# In[ ]:
if DEV:
charts.plot(standby_statistics[['10%', '50%', '90%']], stock=True, show='inline')
# In[ ]:
# Get detailed profiles for the last day
now = pd.Timestamp('now', tz='UTC')
start_of_day = now - pd.Timedelta(hours=now.hour, minutes=now.minute, seconds=now.second)
sensors = map(hp.find_sensor, dfdaymin.columns)
df_details = hp.get_data(sensors = sensors, head=start_of_day)
# ### Boxplot approach. Possible for a period of maximum +/- 2 weeks.
# In[ ]:
# choose a period
look_back_days = 10
start = now - pd.Timedelta(days=look_back_days)
dfdaymin_period = dfdaymin.ix[start:].dropna(axis=1, how='all')
# In[ ]:
box = [dfdaymin_period.loc[i,:].dropna().values for i in dfdaymin_period.index]
for sensor in dfdaymin_period.columns:
plt.figure(figsize=(10,5))
ax1=plt.subplot(121)
ax1.boxplot(box, positions=range(len(box)), notch=False)
ax1.plot(range(len(box)), dfdaymin_period[sensor], 'rD', ms=10, label='Sluipverbruik')
xticks = [x.strftime(format='%d/%m') for x in dfdaymin_period.index]
plt.xticks(range(len(box)), xticks, rotation='vertical')
plt.title(hp.find_sensor(sensor).device.key + ' - ' + sensor)
ax1.grid()
ax1.set_ylabel('Watt')
plt.legend(numpoints=1, frameon=False)
ax2=plt.subplot(122)
try:
ax2.plot_date(df_details[sensor].index, df_details[sensor].values, 'b-', label='Afgelopen nacht')
#ax2.xaxis_date() #Put timeseries plot in local time
# rotate the labels
plt.xticks(rotation='vertical')
ax2.set_ylabel('Watt')
ax2.grid()
plt.legend(loc='upper right', frameon=False)
plt.tight_layout()
except Exception as e:
print(e)
else:
plt.savefig(os.path.join(c.get('data', 'folder'), 'figures', 'standby_horizontal_'+sensor+'.png'), dpi=100)
pass
if not DEV:
plt.close()
# ### Percentile approach. Useful for longer time periods, but tweaking of graph still needed
# In[ ]:
# choose a period
look_back_days = 40
start = now - pd.Timedelta(days=look_back_days)
dfdaymin_period = dfdaymin.ix[start:].dropna(axis=1, how='all')
df = dfdaymin_period.join(standby_statistics[['10%', '50%', '90%']], how='left')
# In[ ]:
for sensor in dfdaymin_period.columns:
plt.figure(figsize=(10,8))
ax1=plt.subplot(211)
ax1.plot_date(df.index, df[u'10%'], '-', lw=2, color='g', label=u'10% percentile')
ax1.plot_date(df.index, df[u'50%'], '-', lw=2, color='orange', label=u'50% percentile')
ax1.plot_date(df.index, df[u'90%'], '-', lw=2, color='r', label=u'90% percentile')
ax1.plot_date(df.index, df[sensor], 'rD', ms=7, label='Your standby power')
ax1.legend()
locs, lables=plt.xticks()
xticks = [x.strftime(format='%d/%m') for x in num2date(locs)]
plt.xticks(locs, xticks, rotation='vertical')
plt.title(hp.find_sensor(sensor).device.key + ' - ' + sensor)
ax1.grid()
ax1.set_ylabel('Watt')
ax2=plt.subplot(212)
try:
ax2.plot_date(df_details[sensor].index, df_details[sensor].values, 'b-', label='Afgelopen nacht')
#ax2.xaxis_date() #Put timeseries plot in local time
# rotate the labels
plt.xticks(rotation='vertical')
ax2.set_ylabel('Watt')
ax2.grid()
plt.legend(loc='upper right', frameon=False)
plt.tight_layout()
except Exception as e:
print(e)
else:
plt.savefig(os.path.join(c.get('data', 'folder'), 'figures', 'standby_vertical_'+sensor+'.png'), dpi=100)
pass
if not DEV:
plt.close()
# In[ ]:
|
|
from __future__ import division, unicode_literals
import os
import re
import sys
import time
from ..compat import compat_str
from ..utils import (
encodeFilename,
decodeArgument,
format_bytes,
timeconvert,
)
class FileDownloader(object):
"""File Downloader class.
File downloader objects are the ones responsible of downloading the
actual video file and writing it to disk.
File downloaders accept a lot of parameters. In order not to saturate
the object constructor with arguments, it receives a dictionary of
options instead.
Available options:
verbose: Print additional info to stdout.
quiet: Do not print messages to stdout.
ratelimit: Download speed limit, in bytes/sec.
retries: Number of times to retry for HTTP error 5xx
buffersize: Size of download buffer in bytes.
noresizebuffer: Do not automatically resize the download buffer.
continuedl: Try to continue downloads if possible.
noprogress: Do not print the progress bar.
logtostderr: Log messages to stderr instead of stdout.
consoletitle: Display progress in console window's titlebar.
nopart: Do not use temporary .part files.
updatetime: Use the Last-modified header to set output file timestamps.
test: Download only first bytes to test the downloader.
min_filesize: Skip files smaller than this size
max_filesize: Skip files larger than this size
xattr_set_filesize: Set ytdl.filesize user xattribute with expected size.
(experimental)
external_downloader_args: A list of additional command-line arguments for the
external downloader.
Subclasses of this one must re-define the real_download method.
"""
_TEST_FILE_SIZE = 10241
params = None
def __init__(self, ydl, params):
"""Create a FileDownloader object with the given options."""
self.ydl = ydl
self._progress_hooks = []
self.params = params
self.add_progress_hook(self.report_progress)
@staticmethod
def format_seconds(seconds):
(mins, secs) = divmod(seconds, 60)
(hours, mins) = divmod(mins, 60)
if hours > 99:
return '--:--:--'
if hours == 0:
return '%02d:%02d' % (mins, secs)
else:
return '%02d:%02d:%02d' % (hours, mins, secs)
@staticmethod
def calc_percent(byte_counter, data_len):
if data_len is None:
return None
return float(byte_counter) / float(data_len) * 100.0
@staticmethod
def format_percent(percent):
if percent is None:
return '---.-%'
return '%6s' % ('%3.1f%%' % percent)
@staticmethod
def calc_eta(start, now, total, current):
if total is None:
return None
if now is None:
now = time.time()
dif = now - start
if current == 0 or dif < 0.001: # One millisecond
return None
rate = float(current) / dif
return int((float(total) - float(current)) / rate)
@staticmethod
def format_eta(eta):
if eta is None:
return '--:--'
return FileDownloader.format_seconds(eta)
@staticmethod
def calc_speed(start, now, bytes):
dif = now - start
if bytes == 0 or dif < 0.001: # One millisecond
return None
return float(bytes) / dif
@staticmethod
def format_speed(speed):
if speed is None:
return '%10s' % '---b/s'
return '%10s' % ('%s/s' % format_bytes(speed))
@staticmethod
def best_block_size(elapsed_time, bytes):
new_min = max(bytes / 2.0, 1.0)
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
if elapsed_time < 0.001:
return int(new_max)
rate = bytes / elapsed_time
if rate > new_max:
return int(new_max)
if rate < new_min:
return int(new_min)
return int(rate)
@staticmethod
def parse_bytes(bytestr):
"""Parse a string indicating a byte quantity into an integer."""
matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
if matchobj is None:
return None
number = float(matchobj.group(1))
multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
return int(round(number * multiplier))
def to_screen(self, *args, **kargs):
self.ydl.to_screen(*args, **kargs)
def to_stderr(self, message):
self.ydl.to_screen(message)
def to_console_title(self, message):
self.ydl.to_console_title(message)
def trouble(self, *args, **kargs):
self.ydl.trouble(*args, **kargs)
def report_warning(self, *args, **kargs):
self.ydl.report_warning(*args, **kargs)
def report_error(self, *args, **kargs):
self.ydl.report_error(*args, **kargs)
def slow_down(self, start_time, now, byte_counter):
"""Sleep if the download speed is over the rate limit."""
rate_limit = self.params.get('ratelimit', None)
if rate_limit is None or byte_counter == 0:
return
if now is None:
now = time.time()
elapsed = now - start_time
if elapsed <= 0.0:
return
speed = float(byte_counter) / elapsed
if speed > rate_limit:
time.sleep(max((byte_counter // rate_limit) - elapsed, 0))
def temp_name(self, filename):
"""Returns a temporary filename for the given filename."""
if self.params.get('nopart', False) or filename == '-' or \
(os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
return filename
return filename + '.part'
def undo_temp_name(self, filename):
if filename.endswith('.part'):
return filename[:-len('.part')]
return filename
def try_rename(self, old_filename, new_filename):
try:
if old_filename == new_filename:
return
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
except (IOError, OSError) as err:
self.report_error('unable to rename file: %s' % compat_str(err))
def try_utime(self, filename, last_modified_hdr):
"""Try to set the last-modified time of the given file."""
if last_modified_hdr is None:
return
if not os.path.isfile(encodeFilename(filename)):
return
timestr = last_modified_hdr
if timestr is None:
return
filetime = timeconvert(timestr)
if filetime is None:
return filetime
# Ignore obviously invalid dates
if filetime == 0:
return
try:
os.utime(filename, (time.time(), filetime))
except Exception:
pass
return filetime
def report_destination(self, filename):
"""Report destination filename."""
self.to_screen('[download] Destination: ' + filename)
def _report_progress_status(self, msg, is_last_line=False):
fullmsg = '[download] ' + msg
if self.params.get('progress_with_newline', False):
self.to_screen(fullmsg)
else:
if os.name == 'nt':
prev_len = getattr(self, '_report_progress_prev_line_length',
0)
if prev_len > len(fullmsg):
fullmsg += ' ' * (prev_len - len(fullmsg))
self._report_progress_prev_line_length = len(fullmsg)
clear_line = '\r'
else:
clear_line = ('\r\x1b[K' if sys.stderr.isatty() else '\r')
self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line)
self.to_console_title('youtube-dl ' + msg)
def report_progress(self, s):
if s['status'] == 'finished':
if self.params.get('noprogress', False):
self.to_screen('[download] Download completed')
else:
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
if s.get('elapsed') is not None:
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
msg_template = '100%% of %(_total_bytes_str)s in %(_elapsed_str)s'
else:
msg_template = '100%% of %(_total_bytes_str)s'
self._report_progress_status(
msg_template % s, is_last_line=True)
if self.params.get('noprogress'):
return
if s['status'] != 'downloading':
return
if s.get('eta') is not None:
s['_eta_str'] = self.format_eta(s['eta'])
else:
s['_eta_str'] = 'Unknown ETA'
if s.get('total_bytes') and s.get('downloaded_bytes') is not None:
s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes'])
elif s.get('total_bytes_estimate') and s.get('downloaded_bytes') is not None:
s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes_estimate'])
else:
if s.get('downloaded_bytes') == 0:
s['_percent_str'] = self.format_percent(0)
else:
s['_percent_str'] = 'Unknown %'
if s.get('speed') is not None:
s['_speed_str'] = self.format_speed(s['speed'])
else:
s['_speed_str'] = 'Unknown speed'
if s.get('total_bytes') is not None:
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
msg_template = '%(_percent_str)s of %(_total_bytes_str)s at %(_speed_str)s ETA %(_eta_str)s'
elif s.get('total_bytes_estimate') is not None:
s['_total_bytes_estimate_str'] = format_bytes(s['total_bytes_estimate'])
msg_template = '%(_percent_str)s of ~%(_total_bytes_estimate_str)s at %(_speed_str)s ETA %(_eta_str)s'
else:
if s.get('downloaded_bytes') is not None:
s['_downloaded_bytes_str'] = format_bytes(s['downloaded_bytes'])
if s.get('elapsed'):
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s (%(_elapsed_str)s)'
else:
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s'
else:
msg_template = '%(_percent_str)s % at %(_speed_str)s ETA %(_eta_str)s'
self._report_progress_status(msg_template % s)
def report_resuming_byte(self, resume_len):
"""Report attempt to resume at given byte."""
self.to_screen('[download] Resuming download at byte %s' % resume_len)
def report_retry(self, count, retries):
"""Report retry in case of HTTP error 5xx"""
self.to_screen('[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded."""
try:
self.to_screen('[download] %s has already been downloaded' % file_name)
except UnicodeEncodeError:
self.to_screen('[download] The file has already been downloaded')
def report_unable_to_resume(self):
"""Report it was impossible to resume download."""
self.to_screen('[download] Unable to resume')
def download(self, filename, info_dict):
"""Download to a filename using the info from info_dict
Return True on success and False otherwise
"""
nooverwrites_and_exists = (
self.params.get('nooverwrites', False) and
os.path.exists(encodeFilename(filename))
)
continuedl_and_exists = (
self.params.get('continuedl', True) and
os.path.isfile(encodeFilename(filename)) and
not self.params.get('nopart', False)
)
# Check file already present
if filename != '-' and (nooverwrites_and_exists or continuedl_and_exists):
self.report_file_already_downloaded(filename)
self._hook_progress({
'filename': filename,
'status': 'finished',
'total_bytes': os.path.getsize(encodeFilename(filename)),
})
return True
sleep_interval = self.params.get('sleep_interval')
if sleep_interval:
self.to_screen('[download] Sleeping %s seconds...' % sleep_interval)
time.sleep(sleep_interval)
return self.real_download(filename, info_dict)
def real_download(self, filename, info_dict):
"""Real download process. Redefine in subclasses."""
raise NotImplementedError('This method must be implemented by subclasses')
def _hook_progress(self, status):
for ph in self._progress_hooks:
ph(status)
def add_progress_hook(self, ph):
# See YoutubeDl.py (search for progress_hooks) for a description of
# this interface
self._progress_hooks.append(ph)
def _debug_cmd(self, args, exe=None):
if not self.params.get('verbose', False):
return
str_args = [decodeArgument(a) for a in args]
if exe is None:
exe = os.path.basename(str_args[0])
try:
import pipes
shell_quote = lambda args: ' '.join(map(pipes.quote, str_args))
except ImportError:
shell_quote = repr
self.to_screen('[debug] %s command line: %s' % (
exe, shell_quote(str_args)))
|
|
import argparse
from pathlib import Path
from typing import Union
import tflite_runtime.interpreter as tflite
from PIL import Image
import numpy as np
def check_args(args: argparse.Namespace):
"""Check the values used in the command-line have acceptable values
args:
- args: argparse.Namespace
returns:
- None
raises:
- FileNotFoundError: if passed files do not exist.
- IOError: if files are of incorrect format.
"""
input_image_p = args.input_image
if not input_image_p.suffix in (".png", ".jpg", ".jpeg"):
raise IOError(
"--input_image option should point to an image file of the "
"format .jpg, .jpeg, .png"
)
if not input_image_p.exists():
raise FileNotFoundError("Cannot find ", input_image_p.name)
model_p = args.model_file
if not model_p.suffix == ".tflite":
raise IOError("--model_file should point to a tflite file.")
if not model_p.exists():
raise FileNotFoundError("Cannot find ", model_p.name)
label_mapping_p = args.label_file
if not label_mapping_p.suffix == ".txt":
raise IOError("--label_file expects a .txt file.")
if not label_mapping_p.exists():
raise FileNotFoundError("Cannot find ", label_mapping_p.name)
# check all args given in preferred backends make sense
supported_backends = ["GpuAcc", "CpuAcc", "CpuRef"]
if not all([backend in supported_backends for backend in args.preferred_backends]):
raise ValueError("Incorrect backends given. Please choose from "\
"'GpuAcc', 'CpuAcc', 'CpuRef'.")
return None
def load_image(image_path: Path, model_input_dims: Union[tuple, list], grayscale: bool):
"""load an image and put into correct format for the tensorflow lite model
args:
- image_path: pathlib.Path
- model_input_dims: tuple (or array-like). (height,width)
returns:
- image: np.array
"""
height, width = model_input_dims
# load and resize image
image = Image.open(image_path).resize((width, height))
# convert to greyscale if expected
if grayscale:
image = image.convert("LA")
image = np.expand_dims(image, axis=0)
return image
def load_delegate(delegate_path: Path, backends: list):
"""load the armnn delegate.
args:
- delegate_path: pathlib.Path -> location of you libarmnnDelegate.so
- backends: list -> list of backends you want to use in string format
returns:
- armnn_delegate: tflite.delegate
"""
# create a command separated string
backend_string = ",".join(backends)
# load delegate
armnn_delegate = tflite.load_delegate(
library=delegate_path,
options={"backends": backend_string, "logging-severity": "info"},
)
return armnn_delegate
def load_tf_model(model_path: Path, armnn_delegate: tflite.Delegate):
"""load a tflite model for use with the armnn delegate.
args:
- model_path: pathlib.Path
- armnn_delegate: tflite.TfLiteDelegate
returns:
- interpreter: tflite.Interpreter
"""
interpreter = tflite.Interpreter(
model_path=model_path.as_posix(), experimental_delegates=[armnn_delegate]
)
interpreter.allocate_tensors()
return interpreter
def run_inference(interpreter, input_image):
"""Run inference on a processed input image and return the output from
inference.
args:
- interpreter: tflite_runtime.interpreter.Interpreter
- input_image: np.array
returns:
- output_data: np.array
"""
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Test model on random input data.
interpreter.set_tensor(input_details[0]["index"], input_image)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]["index"])
return output_data
def create_mapping(label_mapping_p):
"""Creates a Python dictionary mapping an index to a label.
label_mapping[idx] = label
args:
- label_mapping_p: pathlib.Path
returns:
- label_mapping: dict
"""
idx = 0
label_mapping = {}
with open(label_mapping_p) as label_mapping_raw:
for line in label_mapping_raw:
label_mapping[idx] = line
idx += 1
return label_mapping
def process_output(output_data, label_mapping):
"""Process the output tensor into a label from the labelmapping file. Takes
the index of the maximum valur from the output array.
args:
- output_data: np.array
- label_mapping: dict
returns:
- str: labelmapping for max index.
"""
idx = np.argmax(output_data[0])
return label_mapping[idx]
def main(args):
"""Run the inference for options passed in the command line.
args:
- args: argparse.Namespace
returns:
- None
"""
# sanity check on args
check_args(args)
# load in the armnn delegate
armnn_delegate = load_delegate(args.delegate_path, args.preferred_backends)
# load tflite model
interpreter = load_tf_model(args.model_file, armnn_delegate)
# get input shape for image resizing
input_shape = interpreter.get_input_details()[0]["shape"]
height, width = input_shape[1], input_shape[2]
input_shape = (height, width)
# load input image
input_image = load_image(args.input_image, input_shape, False)
# get label mapping
labelmapping = create_mapping(args.label_file)
output_tensor = run_inference(interpreter, input_image)
output_prediction = process_output(output_tensor, labelmapping)
print("Prediction: ", output_prediction)
return None
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--input_image", help="File path of image file", type=Path, required=True
)
parser.add_argument(
"--model_file",
help="File path of the model tflite file",
type=Path,
required=True,
)
parser.add_argument(
"--label_file",
help="File path of model labelmapping file",
type=Path,
required=True,
)
parser.add_argument(
"--delegate_path",
help="File path of ArmNN delegate file",
type=Path,
required=True,
)
parser.add_argument(
"--preferred_backends",
help="list of backends in order of preference",
type=str,
nargs="+",
required=False,
default=["CpuAcc", "CpuRef"],
)
args = parser.parse_args()
main(args)
|
|
from simulux.disks import Disks
from lib.utils import jsonify
# Global disks var to use in all the tests
disks = Disks()
def test_init():
'''
Test default load of the Disks class
'''
try:
disks = Disks()
except Exception as e:
print "Exception raised: %s" % (e)
assert False
assert True
def test_get_childrens_path():
'''
Test that we can fetch 1st level of children of a path
'''
childrens = disks.get_childrens_path('/')
childrens.sort()
expected = [ unicode(i) for i in [
'/bin', '/boot', '/dev', '/proc', '/home', '/root', '/sbin',
'/var', '/etc', '/lost+found', '/lib', '/lib64', '/opt', '/tmp',
'/mnt', '/sys', '/usr'
]]
expected.sort()
assert childrens == expected
def test_get_parent_path():
'''
Test that we can fetch the parent path
'''
parent = disks.get_parent_path('/')
assert parent == unicode('/')
parent = disks.get_parent_path('/etc/hosts')
assert parent == unicode('/etc')
parent = disks.get_parent_path('/some/random/path')
assert parent == unicode('/some/random')
def test_get_details_of_mount_point():
'''
Test we can fetch the details of a mount point folder
'''
details = disks.get_details('/')
expected = {
'mount': True,
'size': 1000000,
'owner': 'root',
'group': 'root',
'mode': 755
}
assert jsonify(details) == jsonify(expected)
def test_get_details_of_good_file():
'''
Test we can fetch the details of a good file
'''
details = disks.get_details('/etc/hosts')
expected = {
'filetype': 'file',
'size': 11,
'owner': 'root',
'group': 'root',
'mode': 644
}
assert jsonify(details) == jsonify(expected)
details = disks.get_details('/etc')
expected = {
'filetype': 'folder',
'size': 1111,
'owner': 'root',
'group': 'root',
'mode': 755
}
assert jsonify(details) == jsonify(expected)
def test_get_details_of_bad_file():
'''
Test we can fetch the details of a bad file
'''
details = disks.get_details('/some/random/path')
expected = {}
assert jsonify(details) == jsonify(expected)
def test_add_new_file():
'''
Test we can add a new file and the size get propagated
'''
disks.add_file('/etc/new_file', size=1000)
details = disks.get_details('/etc/new_file')
expected = {
'size': 1000,
'owner': 'root',
'group': 'root',
'filetype': 'file',
'mode': 755
}
assert jsonify(details) == jsonify(expected)
details = disks.get_details('/etc')
expected = {
'filetype': 'folder',
'size': 2111,
'owner': 'root',
'group': 'root',
'mode': 755
}
assert jsonify(details) == jsonify(expected)
details = disks.get_details('/')
expected = {
'mount': True,
'size': 1001000,
'owner': 'root',
'group': 'root',
'mode': 755
}
assert jsonify(details) == jsonify(expected)
def test_update_file():
'''
Test we can update an existing file and the size get propagated
'''
disks.update_file('/etc/new_file', size=5000, mode=644, owner='foo', group='bar')
details = disks.get_details('/etc/new_file')
expected = {
'size': 5000,
'owner': 'foo',
'group': 'bar',
'filetype': 'file',
'mode': 644
}
assert jsonify(details) == jsonify(expected)
details = disks.get_details('/etc')
expected = {
'filetype': 'folder',
'size': 6111,
'owner': 'root',
'group': 'root',
'mode': 755
}
assert jsonify(details) == jsonify(expected)
details = disks.get_details('/')
expected = {
'mount': True,
'size': 1005000,
'owner': 'root',
'group': 'root',
'mode': 755
}
assert jsonify(details) == jsonify(expected)
def test_remove_file():
'''
Test we can remove a file and get the size propagated
'''
disks.remove_file('/etc/new_file')
details = disks.get_details('/etc/new_file')
expected = {}
assert jsonify(details) == jsonify(expected)
details = disks.get_details('/etc')
expected = {
'filetype': 'folder',
'size': 1111,
'owner': 'root',
'group': 'root',
'mode': 755
}
assert jsonify(details) == jsonify(expected)
details = disks.get_details('/')
expected = {
'mount': True,
'size': 1000000,
'owner': 'root',
'group': 'root',
'mode': 755
}
assert jsonify(details) == jsonify(expected)
def test_remove_folder():
'''
Test folder removal - fail on non recursive
'''
disks.remove_file('/etc')
details = disks.get_details('/etc')
# Fail - not deleted
expected = {
'filetype': 'folder',
'size': 1111,
'owner': 'root',
'group': 'root',
'mode': 755
}
assert jsonify(details) == jsonify(expected)
disks.remove_file('/etc', recursive=True)
details = disks.get_details('/etc')
# Fail - not deleted
expected = {}
assert jsonify(details) == jsonify(expected)
details = disks.get_details('/')
expected = {
'mount': True,
'size': 998889,
'owner': 'root',
'group': 'root',
'mode': 755
}
assert jsonify(details) == jsonify(expected)
def test_remove_mount():
'''
Test mount point folder removal - fail
'''
disks.remove_file('/boot')
details = disks.get_details('/boot')
# Fail - not deleted
expected = {
'mount': True,
'size': 1111,
'owner': 'root',
'group': 'root',
'mode': 755
}
assert jsonify(details) == jsonify(expected)
disks.remove_file('/boot', recursive=True)
details = disks.get_details('/boot')
# Fail - not deleted
expected = {
'mount': True,
'size': 1111,
'owner': 'root',
'group': 'root',
'mode': 755
}
assert jsonify(details) == jsonify(expected)
details = disks.get_details('/')
# Unchanged from previous test and not propagated since other mount point
expected = {
'mount': True,
'size': 998889,
'owner': 'root',
'group': 'root',
'mode': 755
}
assert jsonify(details) == jsonify(expected)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import http
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class CeilometerApiTests(test.APITestCase):
def test_sample_list(self):
samples = self.samples.list()
meter_name = "meter_name"
ceilometerclient = self.stub_ceilometerclient()
ceilometerclient.samples = self.mox.CreateMockAnything()
ceilometerclient.samples.list(meter_name=meter_name, q=[]).\
AndReturn(samples)
self.mox.ReplayAll()
ret_list = api.ceilometer.sample_list(self.request,
meter_name,
query=[])
for c in ret_list:
self.assertIsInstance(c, api.ceilometer.Sample)
def test_meter_list(self):
meters = self.meters.list()
ceilometerclient = self.stub_ceilometerclient()
ceilometerclient.meters = self.mox.CreateMockAnything()
ceilometerclient.meters.list([]).AndReturn(meters)
self.mox.ReplayAll()
ret_list = api.ceilometer.meter_list(self.request, [])
for m in ret_list:
self.assertIsInstance(m, api.ceilometer.Meter)
def test_resource_list(self):
resources = self.resources.list()
ceilometerclient = self.stub_ceilometerclient()
ceilometerclient.resources = self.mox.CreateMockAnything()
ceilometerclient.resources.list(q=[]).AndReturn(resources)
self.mox.ReplayAll()
ret_list = api.ceilometer.resource_list(self.request, query=[])
for r in ret_list:
self.assertIsInstance(r, api.ceilometer.Resource)
def test_statistic_list(self):
statistics = self.statistics.list()
meter_name = "meter_name"
ceilometerclient = self.stub_ceilometerclient()
ceilometerclient.statistics = self.mox.CreateMockAnything()
ceilometerclient.statistics.list(meter_name=meter_name,
period=None, q=[]).\
AndReturn(statistics)
self.mox.ReplayAll()
ret_list = api.ceilometer.statistic_list(self.request,
meter_name,
period=None,
query=[])
for s in ret_list:
self.assertIsInstance(s, api.ceilometer.Statistic)
#TODO(lsmola)
#test resource aggregates
@test.create_stubs({api.ceilometer.CeilometerUsage: ("get_user",
"get_tenant")})
def test_global_data_get(self):
class TempUsage(api.base.APIResourceWrapper):
_attrs = ["id", "tenant", "user", "resource", "fake_meter_1",
"fake_meter_2"]
meters = ["fake_meter_1",
"fake_meter_2"]
default_query = ["Fake query"]
stats_attr = "max"
resources = self.resources.list()
statistics = self.statistics.list()
user = self.ceilometer_users.list()[0]
tenant = self.ceilometer_tenants.list()[0]
ceilometerclient = self.stub_ceilometerclient()
ceilometerclient.resources = self.mox.CreateMockAnything()
# I am returning only 1 resource
ceilometerclient.resources.list(q=IsA(list)).AndReturn(resources[:1])
ceilometerclient.statistics = self.mox.CreateMockAnything()
# check that list is called twice for one resource and 2 meters
ceilometerclient.statistics.list(meter_name=IsA(str),
period=None, q=IsA(list)).\
AndReturn(statistics)
ceilometerclient.statistics.list(meter_name=IsA(str),
period=None, q=IsA(list)).\
AndReturn(statistics)
api.ceilometer.CeilometerUsage\
.get_user(IsA(str)).AndReturn(user)
api.ceilometer.CeilometerUsage\
.get_tenant(IsA(str)).AndReturn(tenant)
self.mox.ReplayAll()
# getting all resources and with statistics
ceilometer_usage = api.ceilometer.CeilometerUsage(http.HttpRequest)
data = ceilometer_usage.global_data_get(
used_cls=TempUsage, query=["fake_query"], with_statistics=True)
first = data[0]
self.assertEqual(first.id, 'fake_project_id__fake_user_id__'
'fake_resource_id')
self.assertEqual(first.user.name, 'user')
self.assertEqual(first.tenant.name, 'test_tenant')
self.assertEqual(first.resource, 'fake_resource_id')
self.assertEqual(first.fake_meter_1, 9)
self.assertEqual(first.fake_meter_2, 9)
# check that only one resource is returned
self.assertEqual(len(data), 1)
@test.create_stubs({api.ceilometer.CeilometerUsage: ("get_user",
"get_tenant")})
def test_global_data_get_without_statistic_data(self):
class TempUsage(api.base.APIResourceWrapper):
_attrs = ["id", "tenant", "user", "resource", "fake_meter_1",
"fake_meter_2"]
meters = ["fake_meter_1",
"fake_meter_2"]
default_query = ["Fake query"]
stats_attr = "max"
resources = self.resources.list()
user = self.ceilometer_users.list()[0]
tenant = self.ceilometer_tenants.list()[0]
ceilometerclient = self.stub_ceilometerclient()
ceilometerclient.resources = self.mox.CreateMockAnything()
ceilometerclient.resources.list(q=IsA(list)).AndReturn(resources)
api.ceilometer.CeilometerUsage\
.get_user(IsA(str)).MultipleTimes().AndReturn(user)
api.ceilometer.CeilometerUsage\
.get_tenant(IsA(str)).MultipleTimes().AndReturn(tenant)
self.mox.ReplayAll()
# getting all resources and with statistics
ceilometer_usage = api.ceilometer.CeilometerUsage(http.HttpRequest)
data = ceilometer_usage.global_data_get(
used_cls=TempUsage, query=["fake_query"], with_statistics=False)
first = data[0]
self.assertEqual(first.id, 'fake_project_id__fake_user_id__'
'fake_resource_id')
self.assertEqual(first.user.name, 'user')
self.assertEqual(first.tenant.name, 'test_tenant')
self.assertEqual(first.resource, 'fake_resource_id')
self.assertRaises(AttributeError, getattr, first, 'fake_meter_1')
self.assertRaises(AttributeError, getattr, first, 'fake_meter_2')
self.assertEqual(len(data), len(resources))
@test.create_stubs({api.ceilometer.CeilometerUsage: ("get_user",
"get_tenant")})
def test_global_data_get_all_statistic_data(self):
class TempUsage(api.base.APIResourceWrapper):
_attrs = ["id", "tenant", "user", "resource", "fake_meter_1",
"fake_meter_2"]
meters = ["fake_meter_1",
"fake_meter_2"]
default_query = ["Fake query"]
stats_attr = None # have to return dictionary with all stats
resources = self.resources.list()
statistics = self.statistics.list()
user = self.ceilometer_users.list()[0]
tenant = self.ceilometer_tenants.list()[0]
ceilometerclient = self.stub_ceilometerclient()
ceilometerclient.resources = self.mox.CreateMockAnything()
ceilometerclient.resources.list(q=IsA(list)).AndReturn(resources)
ceilometerclient.statistics = self.mox.CreateMockAnything()
ceilometerclient.statistics.list(meter_name=IsA(str),
period=None, q=IsA(list)).\
MultipleTimes().\
AndReturn(statistics)
api.ceilometer.CeilometerUsage\
.get_user(IsA(str)).MultipleTimes().AndReturn(user)
api.ceilometer.CeilometerUsage\
.get_tenant(IsA(str)).MultipleTimes().AndReturn(tenant)
self.mox.ReplayAll()
# getting all resources and with statistics
ceilometer_usage = api.ceilometer.CeilometerUsage(http.HttpRequest)
data = ceilometer_usage.global_data_get(
used_cls=TempUsage, query=["fake_query"], with_statistics=True)
first = data[0]
self.assertEqual(first.id, 'fake_project_id__fake_user_id__'
'fake_resource_id')
self.assertEqual(first.user.name, 'user')
self.assertEqual(first.tenant.name, 'test_tenant')
self.assertEqual(first.resource, 'fake_resource_id')
statistic_obj = api.ceilometer.Statistic(statistics[0])
# check that it returns whole statistic object
self.assertEqual(vars(first.fake_meter_1[0]), vars(statistic_obj))
self.assertEqual(vars(first.fake_meter_2[0]), vars(statistic_obj))
self.assertEqual(len(data), len(resources))
@test.create_stubs({api.ceilometer.CeilometerUsage: ("get_user",
"get_tenant")})
def test_global_disk_usage(self):
resources = self.resources.list()
statistics = self.statistics.list()
user = self.ceilometer_users.list()[0]
tenant = self.ceilometer_tenants.list()[0]
ceilometerclient = self.stub_ceilometerclient()
ceilometerclient.resources = self.mox.CreateMockAnything()
ceilometerclient.resources.list(q=IsA(list)).AndReturn(resources)
ceilometerclient.statistics = self.mox.CreateMockAnything()
ceilometerclient.statistics.list(meter_name=IsA(str),
period=None, q=IsA(list)).\
MultipleTimes().\
AndReturn(statistics)
api.ceilometer.CeilometerUsage\
.get_user(IsA(str)).MultipleTimes().AndReturn(user)
api.ceilometer.CeilometerUsage\
.get_tenant(IsA(str)).MultipleTimes().AndReturn(tenant)
self.mox.ReplayAll()
# getting all resources and with statistics
ceilometer_usage = api.ceilometer.CeilometerUsage(http.HttpRequest)
data = ceilometer_usage.global_disk_usage(query=["fake_query"],
with_statistics=True)
first = data[0]
self.assertEqual(first.id, 'fake_project_id__fake_user_id__'
'fake_resource_id')
self.assertEqual(first.user.name, 'user')
self.assertEqual(first.tenant.name, 'test_tenant')
self.assertEqual(first.resource, 'fake_resource_id')
self.assertEqual(first.disk_read_bytes, 4.55)
self.assertEqual(first.disk_write_bytes, 4.55)
self.assertEqual(first.disk_read_requests, 4.55)
self.assertEqual(first.disk_write_requests, 4.55)
self.assertEqual(len(data), len(resources))
@test.create_stubs({api.ceilometer.CeilometerUsage: ("get_user",
"get_tenant")})
def test_global_network_traffic_usage(self):
resources = self.resources.list()
statistics = self.statistics.list()
user = self.ceilometer_users.list()[0]
tenant = self.ceilometer_tenants.list()[0]
ceilometerclient = self.stub_ceilometerclient()
ceilometerclient.resources = self.mox.CreateMockAnything()
ceilometerclient.resources.list(q=IsA(list)).AndReturn(resources)
ceilometerclient.statistics = self.mox.CreateMockAnything()
ceilometerclient.statistics.list(meter_name=IsA(str),
period=None, q=IsA(list)).\
MultipleTimes().\
AndReturn(statistics)
api.ceilometer.CeilometerUsage\
.get_user(IsA(str)).MultipleTimes().AndReturn(user)
api.ceilometer.CeilometerUsage\
.get_tenant(IsA(str)).MultipleTimes().AndReturn(tenant)
self.mox.ReplayAll()
# getting all resources and with statistics
ceilometer_usage = api.ceilometer.CeilometerUsage(http.HttpRequest)
data = ceilometer_usage.global_network_traffic_usage(
query=["fake_query"],
with_statistics=True)
first = data[0]
self.assertEqual(first.id, 'fake_project_id__fake_user_id__'
'fake_resource_id')
self.assertEqual(first.user.name, 'user')
self.assertEqual(first.tenant.name, 'test_tenant')
self.assertEqual(first.resource, 'fake_resource_id')
self.assertEqual(first.network_incoming_bytes, 4.55)
self.assertEqual(first.network_incoming_packets, 4.55)
self.assertEqual(first.network_outgoing_bytes, 4.55)
self.assertEqual(first.network_outgoing_packets, 4.55)
self.assertEqual(len(data), len(resources))
self.assertIsInstance(first, api.ceilometer.GlobalNetworkTrafficUsage)
@test.create_stubs({api.ceilometer.CeilometerUsage: ("get_user",
"get_tenant")})
def test_global_network_usage(self):
resources = self.resources.list()
statistics = self.statistics.list()
user = self.ceilometer_users.list()[0]
tenant = self.ceilometer_tenants.list()[0]
ceilometerclient = self.stub_ceilometerclient()
ceilometerclient.resources = self.mox.CreateMockAnything()
ceilometerclient.resources.list(q=IsA(list)).AndReturn(resources)
ceilometerclient.statistics = self.mox.CreateMockAnything()
ceilometerclient.statistics.list(meter_name=IsA(str),
period=None, q=IsA(list)).\
MultipleTimes().\
AndReturn(statistics)
api.ceilometer.CeilometerUsage\
.get_user(IsA(str)).MultipleTimes().AndReturn(user)
api.ceilometer.CeilometerUsage\
.get_tenant(IsA(str)).MultipleTimes().AndReturn(tenant)
self.mox.ReplayAll()
# getting all resources and with statistics
ceilometer_usage = api.ceilometer.CeilometerUsage(http.HttpRequest)
data = ceilometer_usage.global_network_usage(
query=["fake_query"],
with_statistics=True)
first = data[0]
self.assertEqual(first.id, 'fake_project_id__fake_user_id__'
'fake_resource_id')
self.assertEqual(first.user.name, 'user')
self.assertEqual(first.tenant.name, 'test_tenant')
self.assertEqual(first.resource, 'fake_resource_id')
self.assertEqual(first.network, 9)
self.assertEqual(first.network_create, 9)
self.assertEqual(first.subnet, 9)
self.assertEqual(first.subnet_create, 9)
self.assertEqual(first.port, 9)
self.assertEqual(first.port_create, 9)
self.assertEqual(first.router, 9)
self.assertEqual(first.router_create, 9)
self.assertEqual(first.ip_floating, 9)
self.assertEqual(first.ip_floating_create, 9)
self.assertEqual(len(data), len(resources))
self.assertIsInstance(first, api.ceilometer.GlobalNetworkUsage)
@test.create_stubs({api.ceilometer.CeilometerUsage: ("get_user",
"get_tenant")})
def test_global_object_store_usage(self):
resources = self.resources.list()
statistics = self.statistics.list()
user = self.ceilometer_users.list()[0]
tenant = self.ceilometer_tenants.list()[0]
ceilometerclient = self.stub_ceilometerclient()
ceilometerclient.resources = self.mox.CreateMockAnything()
ceilometerclient.resources.list(q=IsA(list)).AndReturn(resources)
ceilometerclient.statistics = self.mox.CreateMockAnything()
ceilometerclient.statistics.list(meter_name=IsA(str),
period=None, q=IsA(list)).\
MultipleTimes().\
AndReturn(statistics)
api.ceilometer.CeilometerUsage\
.get_user(IsA(str)).MultipleTimes().AndReturn(user)
api.ceilometer.CeilometerUsage\
.get_tenant(IsA(str)).MultipleTimes().AndReturn(tenant)
self.mox.ReplayAll()
# getting all resources and with statistics
ceilometer_usage = api.ceilometer.CeilometerUsage(http.HttpRequest)
data = ceilometer_usage.global_object_store_usage(
query=["fake_query"],
with_statistics=True)
first = data[0]
self.assertEqual(first.id, 'fake_project_id__fake_user_id__'
'fake_resource_id')
self.assertEqual(first.user.name, 'user')
self.assertEqual(first.tenant.name, 'test_tenant')
self.assertEqual(first.resource, 'fake_resource_id')
self.assertEqual(first.storage_objects, 4.55)
self.assertEqual(first.storage_objects_size, 4.55)
self.assertEqual(first.storage_objects_incoming_bytes, 4.55)
self.assertEqual(first.storage_objects_outgoing_bytes, 4.55)
self.assertEqual(len(data), len(resources))
self.assertIsInstance(first, api.ceilometer.GlobalObjectStoreUsage)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Implements a source for reading Avro files."""
import cStringIO
import os
import zlib
import avro
from avro import datafile
from avro import io as avroio
from avro import schema
import apache_beam as beam
from apache_beam.io import filebasedsource
from apache_beam.io import filebasedsink
from apache_beam.io import iobase
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.iobase import Read
from apache_beam.transforms import PTransform
__all__ = ['ReadFromAvro', 'WriteToAvro']
class ReadFromAvro(PTransform):
"""A ``PTransform`` for reading avro files."""
def __init__(self, file_pattern=None, min_bundle_size=0, validate=True):
"""Initializes ``ReadFromAvro``.
Uses source '_AvroSource' to read a set of Avro files defined by a given
file pattern.
If '/mypath/myavrofiles*' is a file-pattern that points to a set of Avro
files, a ``PCollection`` for the records in these Avro files can be created
in the following manner.
p = df.Pipeline(argv=pipeline_args)
records = p | 'Read' >> df.io.ReadFromAvro('/mypath/myavrofiles*')
Each record of this ``PCollection`` will contain a single record read from a
source. Records that are of simple types will be mapped into corresponding
Python types. Records that are of Avro type 'RECORD' will be mapped to
Python dictionaries that comply with the schema contained in the Avro file
that contains those records. In this case, keys of each dictionary
will contain the corresponding field names and will be of type ``string``
while the values of the dictionary will be of the type defined in the
corresponding Avro schema.
For example, if schema of the Avro file is the following.
{"namespace": "example.avro","type": "record","name": "User","fields":
[{"name": "name", "type": "string"},
{"name": "favorite_number", "type": ["int", "null"]},
{"name": "favorite_color", "type": ["string", "null"]}]}
Then records generated by ``AvroSource`` will be dictionaries of the
following form.
{u'name': u'Alyssa', u'favorite_number': 256, u'favorite_color': None}).
Args:
file_pattern: the set of files to be read.
min_bundle_size: the minimum size in bytes, to be considered when
splitting the input into bundles.
validate: flag to verify that the files exist during the pipeline
creation time.
"""
super(ReadFromAvro, self).__init__()
self._source = _AvroSource(file_pattern, min_bundle_size, validate=validate)
def expand(self, pvalue):
return pvalue.pipeline | Read(self._source)
def display_data(self):
return {'source_dd': self._source}
class _AvroUtils(object):
@staticmethod
def read_meta_data_from_file(f):
"""Reads metadata from a given Avro file.
Args:
f: Avro file to read.
Returns:
a tuple containing the codec, schema, and the sync marker of the Avro
file.
Raises:
ValueError: if the file does not start with the byte sequence defined in
the specification.
"""
if f.tell() > 0:
f.seek(0)
decoder = avroio.BinaryDecoder(f)
header = avroio.DatumReader().read_data(datafile.META_SCHEMA,
datafile.META_SCHEMA, decoder)
if header.get('magic') != datafile.MAGIC:
raise ValueError('Not an Avro file. File header should start with %s but'
'started with %s instead.', datafile.MAGIC,
header.get('magic'))
meta = header['meta']
if datafile.CODEC_KEY in meta:
codec = meta[datafile.CODEC_KEY]
else:
codec = 'null'
schema_string = meta[datafile.SCHEMA_KEY]
sync_marker = header['sync']
return codec, schema_string, sync_marker
@staticmethod
def read_block_from_file(f, codec, schema, expected_sync_marker):
"""Reads a block from a given Avro file.
Args:
f: Avro file to read.
codec: The codec to use for block-level decompression.
Supported codecs: 'null', 'deflate', 'snappy'
schema: Avro Schema definition represented as JSON string.
expected_sync_marker: Avro synchronization marker. If the block's sync
marker does not match with this parameter then ValueError is thrown.
Returns:
A single _AvroBlock.
Raises:
ValueError: If the block cannot be read properly because the file doesn't
match the specification.
"""
offset = f.tell()
decoder = avroio.BinaryDecoder(f)
num_records = decoder.read_long()
block_size = decoder.read_long()
block_bytes = decoder.read(block_size)
sync_marker = decoder.read(len(expected_sync_marker))
if sync_marker != expected_sync_marker:
raise ValueError('Unexpected sync marker (actual "%s" vs expected "%s"). '
'Maybe the underlying avro file is corrupted?',
sync_marker, expected_sync_marker)
size = f.tell() - offset
return _AvroBlock(block_bytes, num_records, codec, schema, offset, size)
@staticmethod
def advance_file_past_next_sync_marker(f, sync_marker):
buf_size = 10000
data = f.read(buf_size)
while data:
pos = data.find(sync_marker)
if pos >= 0:
# Adjusting the current position to the ending position of the sync
# marker.
backtrack = len(data) - pos - len(sync_marker)
f.seek(-1 * backtrack, os.SEEK_CUR)
return True
else:
if f.tell() >= len(sync_marker):
# Backtracking in case we partially read the sync marker during the
# previous read. We only have to backtrack if there are at least
# len(sync_marker) bytes before current position. We only have to
# backtrack (len(sync_marker) - 1) bytes.
f.seek(-1 * (len(sync_marker) - 1), os.SEEK_CUR)
data = f.read(buf_size)
class _AvroBlock(object):
"""Represents a block of an Avro file."""
def __init__(self, block_bytes, num_records, codec, schema_string,
offset, size):
# Decompress data early on (if needed) and thus decrease the number of
# parallel copies of the data in memory at any given in time during
# block iteration.
self._decompressed_block_bytes = self._decompress_bytes(block_bytes, codec)
self._num_records = num_records
self._schema = schema.parse(schema_string)
self._offset = offset
self._size = size
def size(self):
return self._size
def offset(self):
return self._offset
@staticmethod
def _decompress_bytes(data, codec):
if codec == 'null':
return data
elif codec == 'deflate':
# zlib.MAX_WBITS is the window size. '-' sign indicates that this is
# raw data (without headers). See zlib and Avro documentations for more
# details.
return zlib.decompress(data, -zlib.MAX_WBITS)
elif codec == 'snappy':
# Snappy is an optional avro codec.
# See Snappy and Avro documentation for more details.
try:
import snappy
except ImportError:
raise ValueError('Snappy does not seem to be installed.')
# Compressed data includes a 4-byte CRC32 checksum which we verify.
# We take care to avoid extra copies of data while slicing large objects
# by use of a buffer.
result = snappy.decompress(buffer(data)[:-4])
avroio.BinaryDecoder(cStringIO.StringIO(data[-4:])).check_crc32(result)
return result
else:
raise ValueError('Unknown codec: %r', codec)
def num_records(self):
return self._num_records
def records(self):
decoder = avroio.BinaryDecoder(
cStringIO.StringIO(self._decompressed_block_bytes))
reader = avroio.DatumReader(
writers_schema=self._schema, readers_schema=self._schema)
current_record = 0
while current_record < self._num_records:
yield reader.read(decoder)
current_record += 1
class _AvroSource(filebasedsource.FileBasedSource):
"""A source for reading Avro files.
``_AvroSource`` is implemented using the file-based source framework available
in module 'filebasedsource'. Hence please refer to module 'filebasedsource'
to fully understand how this source implements operations common to all
file-based sources such as file-pattern expansion and splitting into bundles
for parallel processing.
"""
def read_records(self, file_name, range_tracker):
next_block_start = -1
def split_points_unclaimed(stop_position):
if next_block_start >= stop_position:
# Next block starts at or after the suggested stop position. Hence
# there will not be split points to be claimed for the range ending at
# suggested stop position.
return 0
return iobase.RangeTracker.SPLIT_POINTS_UNKNOWN
range_tracker.set_split_points_unclaimed_callback(split_points_unclaimed)
start_offset = range_tracker.start_position()
if start_offset is None:
start_offset = 0
with self.open_file(file_name) as f:
codec, schema_string, sync_marker = _AvroUtils.read_meta_data_from_file(
f)
# We have to start at current position if previous bundle ended at the
# end of a sync marker.
start_offset = max(0, start_offset - len(sync_marker))
f.seek(start_offset)
_AvroUtils.advance_file_past_next_sync_marker(f, sync_marker)
while range_tracker.try_claim(f.tell()):
block = _AvroUtils.read_block_from_file(f, codec, schema_string,
sync_marker)
next_block_start = block.offset() + block.size()
for record in block.records():
yield record
class WriteToAvro(beam.transforms.PTransform):
"""A ``PTransform`` for writing avro files."""
def __init__(self,
file_path_prefix,
schema,
codec='deflate',
file_name_suffix='',
num_shards=0,
shard_name_template=None,
mime_type='application/x-avro'):
"""Initialize a WriteToAvro transform.
Args:
file_path_prefix: The file path to write to. The files written will begin
with this prefix, followed by a shard identifier (see num_shards), and
end in a common extension, if given by file_name_suffix. In most cases,
only this argument is specified and num_shards, shard_name_template, and
file_name_suffix use default values.
schema: The schema to use, as returned by avro.schema.parse
codec: The codec to use for block-level compression. Any string supported
by the Avro specification is accepted (for example 'null').
file_name_suffix: Suffix for the files written.
num_shards: The number of files (shards) used for output. If not set, the
service will decide on the optimal number of shards.
Constraining the number of shards is likely to reduce
the performance of a pipeline. Setting this value is not recommended
unless you require a specific number of output files.
shard_name_template: A template string containing placeholders for
the shard number and shard count. Currently only '' and
'-SSSSS-of-NNNNN' are patterns accepted by the service.
When constructing a filename for a particular shard number, the
upper-case letters 'S' and 'N' are replaced with the 0-padded shard
number and shard count respectively. This argument can be '' in which
case it behaves as if num_shards was set to 1 and only one file will be
generated. The default pattern used is '-SSSSS-of-NNNNN'.
mime_type: The MIME type to use for the produced files, if the filesystem
supports specifying MIME types.
Returns:
A WriteToAvro transform usable for writing.
"""
self._sink = _AvroSink(file_path_prefix, schema, codec, file_name_suffix,
num_shards, shard_name_template, mime_type)
def expand(self, pcoll):
return pcoll | beam.io.iobase.Write(self._sink)
def display_data(self):
return {'sink_dd': self._sink}
class _AvroSink(filebasedsink.FileBasedSink):
"""A sink to avro files."""
def __init__(self,
file_path_prefix,
schema,
codec,
file_name_suffix,
num_shards,
shard_name_template,
mime_type):
super(_AvroSink, self).__init__(
file_path_prefix,
file_name_suffix=file_name_suffix,
num_shards=num_shards,
shard_name_template=shard_name_template,
coder=None,
mime_type=mime_type,
# Compression happens at the block level using the supplied codec, and
# not at the file level.
compression_type=CompressionTypes.UNCOMPRESSED)
self._schema = schema
self._codec = codec
def open(self, temp_path):
file_handle = super(_AvroSink, self).open(temp_path)
return avro.datafile.DataFileWriter(
file_handle, avro.io.DatumWriter(), self._schema, self._codec)
def write_record(self, writer, value):
writer.append(value)
def display_data(self):
res = super(self.__class__, self).display_data()
res['codec'] = str(self._codec)
res['schema'] = str(self._schema)
return res
|
|
# Copyright 2017 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for representing collections for the Google Cloud Firestore API."""
import random
import warnings
import six
from google.cloud.firestore_v1beta1 import _helpers
from google.cloud.firestore_v1beta1 import query as query_mod
from google.cloud.firestore_v1beta1.proto import document_pb2
from google.cloud.firestore_v1beta1.watch import Watch
from google.cloud.firestore_v1beta1 import document
_AUTO_ID_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
class CollectionReference(object):
"""A reference to a collection in a Firestore database.
The collection may already exist or this class can facilitate creation
of documents within the collection.
Args:
path (Tuple[str, ...]): The components in the collection path.
This is a series of strings representing each collection and
sub-collection ID, as well as the document IDs for any documents
that contain a sub-collection.
kwargs (dict): The keyword arguments for the constructor. The only
supported keyword is ``client`` and it must be a
:class:`~google.cloud.firestore_v1beta1.client.Client` if
provided. It represents the client that created this collection
reference.
Raises:
ValueError: if
* the ``path`` is empty
* there are an even number of elements
* a collection ID in ``path`` is not a string
* a document ID in ``path`` is not a string
TypeError: If a keyword other than ``client`` is used.
"""
def __init__(self, *path, **kwargs):
_helpers.verify_path(path, is_collection=True)
self._path = path
self._client = kwargs.pop("client", None)
if kwargs:
raise TypeError(
"Received unexpected arguments", kwargs, "Only `client` is supported"
)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._path == other._path and self._client == other._client
@property
def id(self):
"""The collection identifier.
Returns:
str: The last component of the path.
"""
return self._path[-1]
@property
def parent(self):
"""Document that owns the current collection.
Returns:
Optional[~.firestore_v1beta1.document.DocumentReference]: The
parent document, if the current collection is not a
top-level collection.
"""
if len(self._path) == 1:
return None
else:
parent_path = self._path[:-1]
return self._client.document(*parent_path)
def document(self, document_id=None):
"""Create a sub-document underneath the current collection.
Args:
document_id (Optional[str]): The document identifier
within the current collection. If not provided, will default
to a random 20 character string composed of digits,
uppercase and lowercase and letters.
Returns:
~.firestore_v1beta1.document.DocumentReference: The child
document.
"""
if document_id is None:
document_id = _auto_id()
child_path = self._path + (document_id,)
return self._client.document(*child_path)
def _parent_info(self):
"""Get fully-qualified parent path and prefix for this collection.
Returns:
Tuple[str, str]: Pair of
* the fully-qualified (with database and project) path to the
parent of this collection (will either be the database path
or a document path).
* the prefix to a document in this collection.
"""
parent_doc = self.parent
if parent_doc is None:
parent_path = _helpers.DOCUMENT_PATH_DELIMITER.join(
(self._client._database_string, "documents")
)
else:
parent_path = parent_doc._document_path
expected_prefix = _helpers.DOCUMENT_PATH_DELIMITER.join((parent_path, self.id))
return parent_path, expected_prefix
def add(self, document_data, document_id=None):
"""Create a document in the Firestore database with the provided data.
Args:
document_data (dict): Property names and values to use for
creating the document.
document_id (Optional[str]): The document identifier within the
current collection. If not provided, an ID will be
automatically assigned by the server (the assigned ID will be
a random 20 character string composed of digits,
uppercase and lowercase letters).
Returns:
Tuple[google.protobuf.timestamp_pb2.Timestamp, \
~.firestore_v1beta1.document.DocumentReference]: Pair of
* The ``update_time`` when the document was created (or
overwritten).
* A document reference for the created document.
Raises:
~google.cloud.exceptions.Conflict: If ``document_id`` is provided
and the document already exists.
"""
if document_id is None:
parent_path, expected_prefix = self._parent_info()
document_pb = document_pb2.Document()
created_document_pb = self._client._firestore_api.create_document(
parent_path,
collection_id=self.id,
document_id=None,
document=document_pb,
mask=None,
metadata=self._client._rpc_metadata,
)
new_document_id = _helpers.get_doc_id(created_document_pb, expected_prefix)
document_ref = self.document(new_document_id)
set_result = document_ref.set(document_data)
return set_result.update_time, document_ref
else:
document_ref = self.document(document_id)
write_result = document_ref.create(document_data)
return write_result.update_time, document_ref
def list_documents(self, page_size=None):
"""List all subdocuments of the current collection.
Args:
page_size (Optional[int]]): The maximum number of documents
in each page of results from this request. Non-positive values
are ignored. Defaults to a sensible value set by the API.
Returns:
Sequence[~.firestore_v1beta1.collection.DocumentReference]:
iterator of subdocuments of the current collection. If the
collection does not exist at the time of `snapshot`, the
iterator will be empty
"""
parent, _ = self._parent_info()
iterator = self._client._firestore_api.list_documents(
parent,
self.id,
page_size=page_size,
show_missing=True,
metadata=self._client._rpc_metadata,
)
iterator.collection = self
iterator.item_to_value = _item_to_document_ref
return iterator
def select(self, field_paths):
"""Create a "select" query with this collection as parent.
See
:meth:`~google.cloud.firestore_v1beta1.query.Query.select` for
more information on this method.
Args:
field_paths (Iterable[str, ...]): An iterable of field paths
(``.``-delimited list of field names) to use as a projection
of document fields in the query results.
Returns:
~.firestore_v1beta1.query.Query: A "projected" query.
"""
query = query_mod.Query(self)
return query.select(field_paths)
def where(self, field_path, op_string, value):
"""Create a "where" query with this collection as parent.
See
:meth:`~google.cloud.firestore_v1beta1.query.Query.where` for
more information on this method.
Args:
field_path (str): A field path (``.``-delimited list of
field names) for the field to filter on.
op_string (str): A comparison operation in the form of a string.
Acceptable values are ``<``, ``<=``, ``==``, ``>=``
and ``>``.
value (Any): The value to compare the field against in the filter.
If ``value`` is :data:`None` or a NaN, then ``==`` is the only
allowed operation.
Returns:
~.firestore_v1beta1.query.Query: A filtered query.
"""
query = query_mod.Query(self)
return query.where(field_path, op_string, value)
def order_by(self, field_path, **kwargs):
"""Create an "order by" query with this collection as parent.
See
:meth:`~google.cloud.firestore_v1beta1.query.Query.order_by` for
more information on this method.
Args:
field_path (str): A field path (``.``-delimited list of
field names) on which to order the query results.
kwargs (Dict[str, Any]): The keyword arguments to pass along
to the query. The only supported keyword is ``direction``, see
:meth:`~google.cloud.firestore_v1beta1.query.Query.order_by`
for more information.
Returns:
~.firestore_v1beta1.query.Query: An "order by" query.
"""
query = query_mod.Query(self)
return query.order_by(field_path, **kwargs)
def limit(self, count):
"""Create a limited query with this collection as parent.
See
:meth:`~google.cloud.firestore_v1beta1.query.Query.limit` for
more information on this method.
Args:
count (int): Maximum number of documents to return that match
the query.
Returns:
~.firestore_v1beta1.query.Query: A limited query.
"""
query = query_mod.Query(self)
return query.limit(count)
def offset(self, num_to_skip):
"""Skip to an offset in a query with this collection as parent.
See
:meth:`~google.cloud.firestore_v1beta1.query.Query.offset` for
more information on this method.
Args:
num_to_skip (int): The number of results to skip at the beginning
of query results. (Must be non-negative.)
Returns:
~.firestore_v1beta1.query.Query: An offset query.
"""
query = query_mod.Query(self)
return query.offset(num_to_skip)
def start_at(self, document_fields):
"""Start query at a cursor with this collection as parent.
See
:meth:`~google.cloud.firestore_v1beta1.query.Query.start_at` for
more information on this method.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor.
"""
query = query_mod.Query(self)
return query.start_at(document_fields)
def start_after(self, document_fields):
"""Start query after a cursor with this collection as parent.
See
:meth:`~google.cloud.firestore_v1beta1.query.Query.start_after` for
more information on this method.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor.
"""
query = query_mod.Query(self)
return query.start_after(document_fields)
def end_before(self, document_fields):
"""End query before a cursor with this collection as parent.
See
:meth:`~google.cloud.firestore_v1beta1.query.Query.end_before` for
more information on this method.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor.
"""
query = query_mod.Query(self)
return query.end_before(document_fields)
def end_at(self, document_fields):
"""End query at a cursor with this collection as parent.
See
:meth:`~google.cloud.firestore_v1beta1.query.Query.end_at` for
more information on this method.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor.
"""
query = query_mod.Query(self)
return query.end_at(document_fields)
def get(self, transaction=None):
"""Deprecated alias for :meth:`stream`."""
warnings.warn(
"'Collection.get' is deprecated: please use 'Collection.stream' instead.",
DeprecationWarning,
stacklevel=2,
)
return self.stream(transaction=transaction)
def stream(self, transaction=None):
"""Read the documents in this collection.
This sends a ``RunQuery`` RPC and then returns an iterator which
consumes each document returned in the stream of ``RunQueryResponse``
messages.
.. note::
The underlying stream of responses will time out after
the ``max_rpc_timeout_millis`` value set in the GAPIC
client configuration for the ``RunQuery`` API. Snapshots
not consumed from the iterator before that point will be lost.
If a ``transaction`` is used and it already has write operations
added, this method cannot be used (i.e. read-after-write is not
allowed).
Args:
transaction (Optional[~.firestore_v1beta1.transaction.\
Transaction]): An existing transaction that the query will
run in.
Yields:
~.firestore_v1beta1.document.DocumentSnapshot: The next
document that fulfills the query.
"""
query = query_mod.Query(self)
return query.stream(transaction=transaction)
def on_snapshot(self, callback):
"""Monitor the documents in this collection.
This starts a watch on this collection using a background thread. The
provided callback is run on the snapshot of the documents.
Args:
callback(~.firestore.collection.CollectionSnapshot): a callback
to run when a change occurs.
Example:
from google.cloud import firestore_v1beta1
db = firestore_v1beta1.Client()
collection_ref = db.collection(u'users')
def on_snapshot(collection_snapshot):
for doc in collection_snapshot.documents:
print(u'{} => {}'.format(doc.id, doc.to_dict()))
# Watch this collection
collection_watch = collection_ref.on_snapshot(on_snapshot)
# Terminate this watch
collection_watch.unsubscribe()
"""
return Watch.for_query(
query_mod.Query(self),
callback,
document.DocumentSnapshot,
document.DocumentReference,
)
def _auto_id():
"""Generate a "random" automatically generated ID.
Returns:
str: A 20 character string composed of digits, uppercase and
lowercase and letters.
"""
return "".join(random.choice(_AUTO_ID_CHARS) for _ in six.moves.xrange(20))
def _item_to_document_ref(iterator, item):
"""Convert Document resource to document ref.
Args:
iterator (google.api_core.page_iterator.GRPCIterator):
iterator response
item (dict): document resource
"""
document_id = item.name.split(_helpers.DOCUMENT_PATH_DELIMITER)[-1]
return iterator.collection.document(document_id)
|
|
#!/usr/bin/env python3
# Copyright 2013-2014 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, pickle, os, shutil, subprocess, gzip, platform
from glob import glob
def do_install(datafilename):
ifile = open(datafilename, 'rb')
d = pickle.load(ifile)
destdir_var = 'DESTDIR'
if destdir_var in os.environ:
d.destdir = os.environ[destdir_var]
else:
d.destdir = ''
d.fullprefix = d.destdir + d.prefix
install_subdirs(d) # Must be first, because it needs to delete the old subtree.
install_targets(d)
install_headers(d)
install_man(d)
install_data(d)
install_po(d)
run_install_script(d)
def install_subdirs(d):
for (src_dir, dst_dir) in d.install_subdirs:
if os.path.isabs(dst_dir):
dst_dir = d.destdir + dst_dir
else:
dst_dir = d.fullprefix + dst_dir
# Python's copytree works in strange ways.
last_level = os.path.split(src_dir)[-1]
final_dst = os.path.join(dst_dir, last_level)
# Don't do rmtree because final_dst might point to e.g. /var/www
# We might need to revert to walking the directory tree by hand.
# shutil.rmtree(final_dst, ignore_errors=True)
shutil.copytree(src_dir, final_dst, symlinks=True)
print('Installing subdir %s to %s.' % (src_dir, dst_dir))
def install_po(d):
packagename = d.po_package_name
for f in d.po:
srcfile = f[0]
localedir = f[1]
languagename = f[2]
outfile = os.path.join(d.fullprefix, localedir, languagename, 'LC_MESSAGES',
packagename + '.mo')
os.makedirs(os.path.split(outfile)[0], exist_ok=True)
shutil.copyfile(srcfile, outfile)
shutil.copystat(srcfile, outfile)
print('Installing %s to %s.' % (srcfile, outfile))
def install_data(d):
for i in d.data:
fullfilename = i[0]
outfilename = i[1]
if os.path.isabs(outfilename):
outdir = d.destdir + os.path.split(outfilename)[0]
outfilename = d.destdir + outfilename
else:
outdir = os.path.join(d.fullprefix, os.path.split(outfilename)[0])
outfilename = os.path.join(outdir, os.path.split(outfilename)[1])
os.makedirs(outdir, exist_ok=True)
print('Installing %s to %s.' % (fullfilename, outdir))
shutil.copyfile(fullfilename, outfilename)
shutil.copystat(fullfilename, outfilename)
def install_man(d):
for m in d.man:
outfileroot = m[1]
outfilename = os.path.join(d.fullprefix, outfileroot)
full_source_filename = m[0]
outdir = os.path.split(outfilename)[0]
os.makedirs(outdir, exist_ok=True)
print('Installing %s to %s.' % (full_source_filename, outdir))
if outfilename.endswith('.gz') and not full_source_filename.endswith('.gz'):
open(outfilename, 'wb').write(gzip.compress(open(full_source_filename, 'rb').read()))
else:
shutil.copyfile(full_source_filename, outfilename)
shutil.copystat(full_source_filename, outfilename)
def install_headers(d):
for t in d.headers:
fullfilename = t[0]
outdir = os.path.join(d.fullprefix, t[1])
fname = os.path.split(fullfilename)[1]
outfilename = os.path.join(outdir, fname)
print('Installing %s to %s' % (fname, outdir))
os.makedirs(outdir, exist_ok=True)
shutil.copyfile(fullfilename, outfilename)
shutil.copystat(fullfilename, outfilename)
def run_install_script(d):
env = {'MESON_SOURCE_ROOT' : d.source_dir,
'MESON_BUILD_ROOT' : d.build_dir,
'MESON_INSTALL_PREFIX' : d.prefix
}
child_env = os.environ.copy()
child_env.update(env)
for i in d.install_scripts:
script = i.cmd_arr[0]
print('Running custom install script %s' % script)
suffix = os.path.splitext(script)[1].lower()
if platform.system().lower() == 'windows' and suffix != '.bat':
first_line = open(script).readline().strip()
if first_line.startswith('#!'):
commands = first_line[2:].split('#')[0].strip().split()
commands[0] = shutil.which(commands[0].split('/')[-1])
if commands[0] is None:
raise RuntimeError("Don't know how to run script %s." % script)
final_command = commands + [script] + i.cmd_arr[1:]
else:
final_command = i.cmd_arr
try:
rc = subprocess.call(final_command, env=child_env)
if rc != 0:
sys.exit(rc)
except Exception:
print('Failed to run install script:', i.cmd_arr[0])
sys.exit(1)
def is_elf_platform():
platname = platform.system().lower()
if platname == 'darwin' or platname == 'windows':
return False
return True
def check_for_stampfile(fname):
'''Some languages e.g. Rust have output files
whose names are not known at configure time.
Check if this is the case and return the real
file instead.'''
if fname.endswith('.so') or fname.endswith('.dll'):
if os.stat(fname).st_size == 0:
(base, suffix) = os.path.splitext(fname)
files = glob(base + '-*' + suffix)
if len(files) > 1:
print("Stale dynamic library files in build dir. Can't install.")
sys.exit(1)
if len(files) == 1:
return files[0]
elif fname.endswith('.a') or fname.endswith('.lib'):
if os.stat(fname).st_size == 0:
(base, suffix) = os.path.splitext(fname)
files = glob(base + '-*' + '.rlib')
if len(files) > 1:
print("Stale static library files in build dir. Can't install.")
sys.exit(1)
if len(files) == 1:
return files[0]
return fname
def install_targets(d):
for t in d.targets:
fname = check_for_stampfile(t[0])
outdir = os.path.join(d.fullprefix, t[1])
aliases = t[2]
outname = os.path.join(outdir, os.path.split(fname)[-1])
should_strip = t[3]
install_rpath = t[4]
print('Installing %s to %s' % (fname, outname))
os.makedirs(outdir, exist_ok=True)
shutil.copyfile(fname, outname)
shutil.copystat(fname, outname)
if should_strip:
print('Stripping target')
ps = subprocess.Popen(['strip', outname], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdo, stde) = ps.communicate()
if ps.returncode != 0:
print('Could not strip file.\n')
print('Stdout:\n%s\n' % stdo.decode())
print('Stderr:\n%s\n' % stde.decode())
sys.exit(1)
printed_symlink_error = False
for alias in aliases:
try:
symlinkfilename = os.path.join(outdir, alias)
try:
os.unlink(symlinkfilename)
except FileNotFoundError:
pass
os.symlink(os.path.split(fname)[-1], symlinkfilename)
except (NotImplementedError, OSError):
if not printed_symlink_error:
print("Symlink creation does not work on this platform.")
printed_symlink_error = True
if is_elf_platform():
p = subprocess.Popen(d.depfixer + [outname, install_rpath],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdo, stde) = p.communicate()
if p.returncode != 0:
print('Could not fix dependency info.\n')
print('Stdout:\n%s\n' % stdo.decode())
print('Stderr:\n%s\n' % stde.decode())
sys.exit(1)
def run(args):
if len(args) != 1:
print('Installer script for Meson. Do not run on your own, mmm\'kay?')
print('meson_install.py [install info file]')
datafilename = args[0]
do_install(datafilename)
return 0
if __name__ == '__main__':
sys.exit(run(sys.argv[1:]))
|
|
"""HTTP server classes.
Note: BaseHTTPRequestHandler doesn't implement any HTTP request; see
SimpleHTTPRequestHandler for simple implementations of GET, HEAD and POST,
and CGIHTTPRequestHandler for CGI scripts.
It does, however, optionally implement HTTP/1.1 persistent connections,
as of version 0.3.
Notes on CGIHTTPRequestHandler
------------------------------
This class implements GET and POST requests to cgi-bin scripts.
If the os.fork() function is not present (e.g. on Windows),
subprocess.Popen() is used as a fallback, with slightly altered semantics.
In all cases, the implementation is intentionally naive -- all
requests are executed synchronously.
SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL
-- it may execute arbitrary Python code or external programs.
Note that status code 200 is sent prior to execution of a CGI script, so
scripts cannot send other status codes such as 302 (redirect).
XXX To do:
- log requests even later (to capture byte count)
- log user-agent header and other interesting goodies
- send error log to separate file
"""
# See also:
#
# HTTP Working Group T. Berners-Lee
# INTERNET-DRAFT R. T. Fielding
# <draft-ietf-http-v10-spec-00.txt> H. Frystyk Nielsen
# Expires September 8, 1995 March 8, 1995
#
# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt
#
# and
#
# Network Working Group R. Fielding
# Request for Comments: 2616 et al
# Obsoletes: 2068 June 1999
# Category: Standards Track
#
# URL: http://www.faqs.org/rfcs/rfc2616.html
# Log files
# ---------
#
# Here's a quote from the NCSA httpd docs about log file format.
#
# | The logfile format is as follows. Each line consists of:
# |
# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb
# |
# | host: Either the DNS name or the IP number of the remote client
# | rfc931: Any information returned by identd for this person,
# | - otherwise.
# | authuser: If user sent a userid for authentication, the user name,
# | - otherwise.
# | DD: Day
# | Mon: Month (calendar name)
# | YYYY: Year
# | hh: hour (24-hour format, the machine's timezone)
# | mm: minutes
# | ss: seconds
# | request: The first line of the HTTP request as sent by the client.
# | ddd: the status code returned by the server, - if not available.
# | bbbb: the total number of bytes sent,
# | *not including the HTTP/1.0 header*, - if not available
# |
# | You can determine the name of the file accessed through request.
#
# (Actually, the latter is only true if you know the server configuration
# at the time the request was made!)
__version__ = "0.6"
__all__ = ["HTTPServer", "BaseHTTPRequestHandler"]
import html
import email.message
import email.parser
import http.client
import io
import mimetypes
import os
import posixpath
import select
import shutil
import socket # For gethostbyaddr()
import socketserver
import sys
import time
import urllib.parse
import copy
# Default error message template
DEFAULT_ERROR_MESSAGE = """\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<head>
<meta http-equiv="Content-Type" content="text/html;charset=utf-8">
<title>Error response</title>
</head>
<body>
<h1>Error response</h1>
<p>Error code: %(code)d</p>
<p>Message: %(message)s.</p>
<p>Error code explanation: %(code)s - %(explain)s.</p>
</body>
</html>
"""
DEFAULT_ERROR_CONTENT_TYPE = "text/html;charset=utf-8"
def _quote_html(html):
return html.replace("&", "&").replace("<", "<").replace(">", ">")
class HTTPServer(socketserver.TCPServer):
allow_reuse_address = 1 # Seems to make sense in testing environment
def server_bind(self):
"""Override server_bind to store the server name."""
socketserver.TCPServer.server_bind(self)
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
class BaseHTTPRequestHandler(socketserver.StreamRequestHandler):
"""HTTP request handler base class.
The following explanation of HTTP serves to guide you through the
code as well as to expose any misunderstandings I may have about
HTTP (so you don't need to read the code to figure out I'm wrong
:-).
HTTP (HyperText Transfer Protocol) is an extensible protocol on
top of a reliable stream transport (e.g. TCP/IP). The protocol
recognizes three parts to a request:
1. One line identifying the request type and path
2. An optional set of RFC-822-style headers
3. An optional data part
The headers and data are separated by a blank line.
The first line of the request has the form
<command> <path> <version>
where <command> is a (case-sensitive) keyword such as GET or POST,
<path> is a string containing path information for the request,
and <version> should be the string "HTTP/1.0" or "HTTP/1.1".
<path> is encoded using the URL encoding scheme (using %xx to signify
the ASCII character with hex code xx).
The specification specifies that lines are separated by CRLF but
for compatibility with the widest range of clients recommends
servers also handle LF. Similarly, whitespace in the request line
is treated sensibly (allowing multiple spaces between components
and allowing trailing whitespace).
Similarly, for output, lines ought to be separated by CRLF pairs
but most clients grok LF characters just fine.
If the first line of the request has the form
<command> <path>
(i.e. <version> is left out) then this is assumed to be an HTTP
0.9 request; this form has no optional headers and data part and
the reply consists of just the data.
The reply form of the HTTP 1.x protocol again has three parts:
1. One line giving the response code
2. An optional set of RFC-822-style headers
3. The data
Again, the headers and data are separated by a blank line.
The response code line has the form
<version> <responsecode> <responsestring>
where <version> is the protocol version ("HTTP/1.0" or "HTTP/1.1"),
<responsecode> is a 3-digit response code indicating success or
failure of the request, and <responsestring> is an optional
human-readable string explaining what the response code means.
This server parses the request and the headers, and then calls a
function specific to the request type (<command>). Specifically,
a request SPAM will be handled by a method do_SPAM(). If no
such method exists the server sends an error response to the
client. If it exists, it is called with no arguments:
do_SPAM()
Note that the request name is case sensitive (i.e. SPAM and spam
are different requests).
The various request details are stored in instance variables:
- client_address is the client IP address in the form (host,
port);
- command, path and version are the broken-down request line;
- headers is an instance of email.message.Message (or a derived
class) containing the header information;
- rfile is a file object open for reading positioned at the
start of the optional input data part;
- wfile is a file object open for writing.
IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING!
The first thing to be written must be the response line. Then
follow 0 or more header lines, then a blank line, and then the
actual data (if any). The meaning of the header lines depends on
the command executed by the server; in most cases, when data is
returned, there should be at least one header line of the form
Content-type: <type>/<subtype>
where <type> and <subtype> should be registered MIME types,
e.g. "text/html" or "text/plain".
"""
# The Python system version, truncated to its first component.
sys_version = "Python/" + sys.version.split()[0]
# The server software version. You may want to override this.
# The format is multiple whitespace-separated strings,
# where each string is of the form name[/version].
server_version = "BaseHTTP/" + __version__
error_message_format = DEFAULT_ERROR_MESSAGE
error_content_type = DEFAULT_ERROR_CONTENT_TYPE
# The default request version. This only affects responses up until
# the point where the request line is parsed, so it mainly decides what
# the client gets back when sending a malformed request line.
# Most web servers default to HTTP 0.9, i.e. don't send a status line.
default_request_version = "HTTP/0.9"
def parse_request(self):
"""Parse a request (internal).
The request should be stored in self.raw_requestline; the results
are in self.command, self.path, self.request_version and
self.headers.
Return True for success, False for failure; on failure, an
error is sent back.
"""
self.command = None # set in case of error on the first line
self.request_version = version = self.default_request_version
self.close_connection = 1
requestline = str(self.raw_requestline, 'iso-8859-1')
requestline = requestline.rstrip('\r\n')
self.requestline = requestline
words = requestline.split()
if len(words) == 3:
command, path, version = words
if version[:5] != 'HTTP/':
self.send_error(400, "Bad request version (%r)" % version)
return False
try:
base_version_number = version.split('/', 1)[1]
version_number = base_version_number.split(".")
# RFC 2145 section 3.1 says there can be only one "." and
# - major and minor numbers MUST be treated as
# separate integers;
# - HTTP/2.4 is a lower version than HTTP/2.13, which in
# turn is lower than HTTP/12.3;
# - Leading zeros MUST be ignored by recipients.
if len(version_number) != 2:
raise ValueError
version_number = int(version_number[0]), int(version_number[1])
except (ValueError, IndexError):
self.send_error(400, "Bad request version (%r)" % version)
return False
if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1":
self.close_connection = 0
if version_number >= (2, 0):
self.send_error(505,
"Invalid HTTP Version (%s)" % base_version_number)
return False
elif len(words) == 2:
command, path = words
self.close_connection = 1
if command != 'GET':
self.send_error(400,
"Bad HTTP/0.9 request type (%r)" % command)
return False
elif not words:
return False
else:
self.send_error(400, "Bad request syntax (%r)" % requestline)
return False
self.command, self.path, self.request_version = command, path, version
# Examine the headers and look for a Connection directive.
try:
self.headers = http.client.parse_headers(self.rfile,
_class=self.MessageClass)
except http.client.LineTooLong:
self.send_error(400, "Line too long")
return False
conntype = self.headers.get('Connection', "")
if conntype.lower() == 'close':
self.close_connection = 1
elif (conntype.lower() == 'keep-alive' and
self.protocol_version >= "HTTP/1.1"):
self.close_connection = 0
# Examine the headers and look for an Expect directive
expect = self.headers.get('Expect', "")
if (expect.lower() == "100-continue" and
self.protocol_version >= "HTTP/1.1" and
self.request_version >= "HTTP/1.1"):
if not self.handle_expect_100():
return False
return True
def handle_expect_100(self):
"""Decide what to do with an "Expect: 100-continue" header.
If the client is expecting a 100 Continue response, we must
respond with either a 100 Continue or a final response before
waiting for the request body. The default is to always respond
with a 100 Continue. You can behave differently (for example,
reject unauthorized requests) by overriding this method.
This method should either return True (possibly after sending
a 100 Continue response) or send an error response and return
False.
"""
self.send_response_only(100)
return True
def handle_one_request(self):
"""Handle a single HTTP request.
You normally don't need to override this method; see the class
__doc__ string for information on how to handle specific HTTP
commands such as GET and POST.
"""
try:
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(414)
return
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request():
# An error code has been sent, just exit
return
mname = 'do_' + self.command
if not hasattr(self, mname):
self.send_error(501, "Unsupported method (%r)" % self.command)
return
method = getattr(self, mname)
method()
self.wfile.flush() #actually send the response if not already done.
except socket.timeout as e:
#a read or a write timed out. Discard this connection
self.log_error("Request timed out: %r", e)
self.close_connection = 1
return
def handle(self):
"""Handle multiple requests if necessary."""
self.close_connection = 1
self.handle_one_request()
while not self.close_connection:
self.handle_one_request()
def send_error(self, code, message=None):
"""Send and log an error reply.
Arguments are the error code, and a detailed message.
The detailed message defaults to the short entry matching the
response code.
This sends an error response (so it must be called before any
output has been generated), logs the error, and finally sends
a piece of HTML explaining the error to the user.
"""
try:
shortmsg, longmsg = self.responses[code]
except KeyError:
shortmsg, longmsg = '???', '???'
if message is None:
message = shortmsg
explain = longmsg
self.log_error("code %d, message %s", code, message)
# using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201)
content = (self.error_message_format %
{'code': code, 'message': _quote_html(message), 'explain': explain})
self.send_response(code, message)
self.send_header("Content-Type", self.error_content_type)
self.send_header('Connection', 'close')
self.end_headers()
if self.command != 'HEAD' and code >= 200 and code not in (204, 304):
self.wfile.write(content.encode('UTF-8', 'replace'))
def send_response(self, code, message=None):
"""Send the response header and log the response code.
Also send two standard headers with the server software
version and the current date.
"""
self.log_request(code)
self.send_response_only(code, message)
self.send_header('Server', self.version_string())
self.send_header('Date', self.date_time_string())
def send_response_only(self, code, message=None):
"""Send the response header only."""
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ''
if self.request_version != 'HTTP/0.9':
self.wfile.write(("%s %d %s\r\n" %
(self.protocol_version, code, message)).encode('latin1', 'strict'))
def send_header(self, keyword, value):
"""Send a MIME header."""
if self.request_version != 'HTTP/0.9':
if not hasattr(self, '_headers_buffer'):
self._headers_buffer = []
self._headers_buffer.append(
("%s: %s\r\n" % (keyword, value)).encode('latin1', 'strict'))
if keyword.lower() == 'connection':
if value.lower() == 'close':
self.close_connection = 1
elif value.lower() == 'keep-alive':
self.close_connection = 0
def end_headers(self):
"""Send the blank line ending the MIME headers."""
if self.request_version != 'HTTP/0.9':
self._headers_buffer.append(b"\r\n")
self.wfile.write(b"".join(self._headers_buffer))
self._headers_buffer = []
def log_request(self, code='-', size='-'):
"""Log an accepted request.
This is called by send_response().
"""
self.log_message('"%s" %s %s',
self.requestline, str(code), str(size))
def log_error(self, format, *args):
"""Log an error.
This is called when a request cannot be fulfilled. By
default it passes the message on to log_message().
Arguments are the same as for log_message().
XXX This should go to the separate error log.
"""
self.log_message(format, *args)
def log_message(self, format, *args):
"""Log an arbitrary message.
This is used by all other logging functions. Override
it if you have specific logging wishes.
The first argument, FORMAT, is a format string for the
message to be logged. If the format string contains
any % escapes requiring parameters, they should be
specified as subsequent arguments (it's just like
printf!).
The client host and current date/time are prefixed to
every message.
"""
sys.stderr.write("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format%args))
def version_string(self):
"""Return the server software version string."""
return self.server_version + ' ' + self.sys_version
def date_time_string(self, timestamp=None):
"""Return the current date and time formatted for a message header."""
if timestamp is None:
timestamp = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
self.weekdayname[wd],
day, self.monthname[month], year,
hh, mm, ss)
return s
def log_date_time_string(self):
"""Return the current time formatted for logging."""
now = time.time()
year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
s = "%02d/%3s/%04d %02d:%02d:%02d" % (
day, self.monthname[month], year, hh, mm, ss)
return s
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def address_string(self):
"""Return the client address formatted for logging.
This version looks up the full hostname using gethostbyaddr(),
and tries to find a name that contains at least one dot.
"""
host, port = self.client_address[:2]
return socket.getfqdn(host)
# Essentially static class variables
# The version of the HTTP protocol we support.
# Set this to HTTP/1.1 to enable automatic keepalive
protocol_version = "HTTP/1.0"
# MessageClass used to parse headers
MessageClass = http.client.HTTPMessage
# Table mapping response codes to messages; entries have the
# form {code: (shortmessage, longmessage)}.
# See RFC 2616.
responses = {
100: ('Continue', 'Request received, please continue'),
101: ('Switching Protocols',
'Switching to new protocol; obey Upgrade header'),
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted',
'Request accepted, processing continues off-line'),
203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
204: ('No Content', 'Request fulfilled, nothing follows'),
205: ('Reset Content', 'Clear input form for further input.'),
206: ('Partial Content', 'Partial content follows.'),
300: ('Multiple Choices',
'Object has several resources -- see URI list'),
301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('See Other', 'Object moved -- see Method and URL list'),
304: ('Not Modified',
'Document has not changed since given time'),
305: ('Use Proxy',
'You must use proxy specified in Location to access this '
'resource.'),
307: ('Temporary Redirect',
'Object moved temporarily -- see URI list'),
400: ('Bad Request',
'Bad request syntax or unsupported method'),
401: ('Unauthorized',
'No permission -- see authorization schemes'),
402: ('Payment Required',
'No payment -- see charging schemes'),
403: ('Forbidden',
'Request forbidden -- authorization will not help'),
404: ('Not Found', 'Nothing matches the given URI'),
405: ('Method Not Allowed',
'Specified method is invalid for this resource.'),
406: ('Not Acceptable', 'URI not available in preferred format.'),
407: ('Proxy Authentication Required', 'You must authenticate with '
'this proxy before proceeding.'),
408: ('Request Timeout', 'Request timed out; try again later.'),
409: ('Conflict', 'Request conflict.'),
410: ('Gone',
'URI no longer exists and has been permanently removed.'),
411: ('Length Required', 'Client must specify Content-Length.'),
412: ('Precondition Failed', 'Precondition in headers is false.'),
413: ('Request Entity Too Large', 'Entity is too large.'),
414: ('Request-URI Too Long', 'URI is too long.'),
415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
416: ('Requested Range Not Satisfiable',
'Cannot satisfy request range.'),
417: ('Expectation Failed',
'Expect condition could not be satisfied.'),
500: ('Internal Server Error', 'Server got itself in trouble'),
501: ('Not Implemented',
'Server does not support this operation'),
502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
503: ('Service Unavailable',
'The server cannot process the request due to a high load'),
504: ('Gateway Timeout',
'The gateway server did not receive a timely response'),
505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
}
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
"""Simple HTTP request handler with GET and HEAD commands.
This serves files from the current directory and any of its
subdirectories. The MIME type for files is determined by
calling the .guess_type() method.
The GET and HEAD requests are identical except that the HEAD
request omits the actual contents of the file.
"""
server_version = "SimpleHTTP/" + __version__
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
def do_HEAD(self):
"""Serve a HEAD request."""
f = self.send_head()
if f:
f.close()
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
r = []
displaypath = html.escape(urllib.parse.unquote(self.path))
r.append('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
r.append("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
r.append("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
r.append("<hr>\n<ul>\n")
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
r.append('<li><a href="%s">%s</a>\n'
% (urllib.parse.quote(linkname), html.escape(displayname)))
r.append("</ul>\n<hr>\n</body>\n</html>\n")
enc = sys.getfilesystemencoding()
encoded = ''.join(r).encode(enc)
f = io.BytesIO()
f.write(encoded)
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html; charset=%s" % enc)
self.send_header("Content-Length", str(len(encoded)))
self.end_headers()
return f
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
path = posixpath.normpath(urllib.parse.unquote(path))
words = path.split('/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
return path
def copyfile(self, source, outputfile):
"""Copy all data between two file objects.
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
"""
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using application/octet-stream
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map['']
if not mimetypes.inited:
mimetypes.init() # try to read system mime.types
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
# Utilities for CGIHTTPRequestHandler
# TODO(gregory.p.smith): Move this into an appropriate library.
def _url_collapse_path_split(path):
"""
Given a URL path, remove extra '/'s and '.' path elements and collapse
any '..' references.
Implements something akin to RFC-2396 5.2 step 6 to parse relative paths.
Returns: A tuple of (head, tail) where tail is everything after the final /
and head is everything before it. Head will always start with a '/' and,
if it contains anything else, never have a trailing '/'.
Raises: IndexError if too many '..' occur within the path.
"""
# Similar to os.path.split(os.path.normpath(path)) but specific to URL
# path semantics rather than local operating system semantics.
path_parts = []
for part in path.split('/'):
if part == '.':
path_parts.append('')
else:
path_parts.append(part)
# Filter out blank non trailing parts before consuming the '..'.
path_parts = [part for part in path_parts[:-1] if part] + path_parts[-1:]
if path_parts:
tail_part = path_parts.pop()
else:
tail_part = ''
head_parts = []
for part in path_parts:
if part == '..':
head_parts.pop()
else:
head_parts.append(part)
if tail_part and tail_part == '..':
head_parts.pop()
tail_part = ''
return ('/' + '/'.join(head_parts), tail_part)
nobody = None
def nobody_uid():
"""Internal routine to get nobody's uid"""
global nobody
if nobody:
return nobody
try:
import pwd
except ImportError:
return -1
try:
nobody = pwd.getpwnam('nobody')[2]
except KeyError:
nobody = 1 + max(x[2] for x in pwd.getpwall())
return nobody
def executable(path):
"""Test for executable file."""
try:
st = os.stat(path)
except os.error:
return False
return st.st_mode & 0o111 != 0
class CGIHTTPRequestHandler(SimpleHTTPRequestHandler):
"""Complete HTTP server with GET, HEAD and POST commands.
GET and HEAD also support running CGI scripts.
The POST command is *only* implemented for CGI scripts.
"""
# Determine platform specifics
have_fork = hasattr(os, 'fork')
# Make rfile unbuffered -- we need to read one line and then pass
# the rest to a subprocess, so we can't use buffered input.
rbufsize = 0
def do_POST(self):
"""Serve a POST request.
This is only implemented for CGI scripts.
"""
if self.is_cgi():
self.run_cgi()
else:
self.send_error(501, "Can only POST to CGI scripts")
def send_head(self):
"""Version of send_head that support CGI scripts"""
if self.is_cgi():
return self.run_cgi()
else:
return SimpleHTTPRequestHandler.send_head(self)
def is_cgi(self):
"""Test whether self.path corresponds to a CGI script.
Returns True and updates the cgi_info attribute to the tuple
(dir, rest) if self.path requires running a CGI script.
Returns False otherwise.
If any exception is raised, the caller should assume that
self.path was rejected as invalid and act accordingly.
The default implementation tests whether the normalized url
path begins with one of the strings in self.cgi_directories
(and the next character is a '/' or the end of the string).
"""
splitpath = _url_collapse_path_split(self.path)
if splitpath[0] in self.cgi_directories:
self.cgi_info = splitpath
return True
return False
cgi_directories = ['/cgi-bin', '/htbin']
def is_executable(self, path):
"""Test whether argument path is an executable file."""
return executable(path)
def is_python(self, path):
"""Test whether argument path is a Python script."""
head, tail = os.path.splitext(path)
return tail.lower() in (".py", ".pyw")
def run_cgi(self):
"""Execute a CGI script."""
path = self.path
dir, rest = self.cgi_info
i = path.find('/', len(dir) + 1)
while i >= 0:
nextdir = path[:i]
nextrest = path[i+1:]
scriptdir = self.translate_path(nextdir)
if os.path.isdir(scriptdir):
dir, rest = nextdir, nextrest
i = path.find('/', len(dir) + 1)
else:
break
# find an explicit query string, if present.
i = rest.rfind('?')
if i >= 0:
rest, query = rest[:i], rest[i+1:]
else:
query = ''
# dissect the part after the directory name into a script name &
# a possible additional path, to be stored in PATH_INFO.
i = rest.find('/')
if i >= 0:
script, rest = rest[:i], rest[i:]
else:
script, rest = rest, ''
scriptname = dir + '/' + script
scriptfile = self.translate_path(scriptname)
if not os.path.exists(scriptfile):
self.send_error(404, "No such CGI script (%r)" % scriptname)
return
if not os.path.isfile(scriptfile):
self.send_error(403, "CGI script is not a plain file (%r)" %
scriptname)
return
ispy = self.is_python(scriptname)
if not ispy:
if not self.is_executable(scriptfile):
self.send_error(403, "CGI script is not executable (%r)" %
scriptname)
return
# Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
# XXX Much of the following could be prepared ahead of time!
env = copy.deepcopy(os.environ)
env['SERVER_SOFTWARE'] = self.version_string()
env['SERVER_NAME'] = self.server.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PROTOCOL'] = self.protocol_version
env['SERVER_PORT'] = str(self.server.server_port)
env['REQUEST_METHOD'] = self.command
uqrest = urllib.parse.unquote(rest)
env['PATH_INFO'] = uqrest
env['PATH_TRANSLATED'] = self.translate_path(uqrest)
env['SCRIPT_NAME'] = scriptname
if query:
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
authorization = self.headers.get("authorization")
if authorization:
authorization = authorization.split()
if len(authorization) == 2:
import base64, binascii
env['AUTH_TYPE'] = authorization[0]
if authorization[0].lower() == "basic":
try:
authorization = authorization[1].encode('ascii')
authorization = base64.decodebytes(authorization).\
decode('ascii')
except (binascii.Error, UnicodeError):
pass
else:
authorization = authorization.split(':')
if len(authorization) == 2:
env['REMOTE_USER'] = authorization[0]
# XXX REMOTE_IDENT
if self.headers.get('content-type') is None:
env['CONTENT_TYPE'] = self.headers.get_content_type()
else:
env['CONTENT_TYPE'] = self.headers['content-type']
length = self.headers.get('content-length')
if length:
env['CONTENT_LENGTH'] = length
referer = self.headers.get('referer')
if referer:
env['HTTP_REFERER'] = referer
accept = []
for line in self.headers.getallmatchingheaders('accept'):
if line[:1] in "\t\n\r ":
accept.append(line.strip())
else:
accept = accept + line[7:].split(',')
env['HTTP_ACCEPT'] = ','.join(accept)
ua = self.headers.get('user-agent')
if ua:
env['HTTP_USER_AGENT'] = ua
co = filter(None, self.headers.get_all('cookie', []))
cookie_str = ', '.join(co)
if cookie_str:
env['HTTP_COOKIE'] = cookie_str
# XXX Other HTTP_* headers
# Since we're setting the env in the parent, provide empty
# values to override previously set values
for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'):
env.setdefault(k, "")
self.send_response(200, "Script output follows")
decoded_query = query.replace('+', ' ')
if self.have_fork:
# Unix -- fork as we should
args = [script]
if '=' not in decoded_query:
args.append(decoded_query)
nobody = nobody_uid()
self.wfile.flush() # Always flush before forking
pid = os.fork()
if pid != 0:
# Parent
pid, sts = os.waitpid(pid, 0)
# throw away additional data [see bug #427345]
while select.select([self.rfile], [], [], 0)[0]:
if not self.rfile.read(1):
break
if sts:
self.log_error("CGI script exit status %#x", sts)
return
# Child
try:
try:
os.setuid(nobody)
except os.error:
pass
os.dup2(self.rfile.fileno(), 0)
os.dup2(self.wfile.fileno(), 1)
os.execve(scriptfile, args, env)
except:
self.server.handle_error(self.request, self.client_address)
os._exit(127)
else:
# Non-Unix -- use subprocess
import subprocess
cmdline = [scriptfile]
if self.is_python(scriptfile):
interp = sys.executable
if interp.lower().endswith("w.exe"):
# On Windows, use python.exe, not pythonw.exe
interp = interp[:-5] + interp[-4:]
cmdline = [interp, '-u'] + cmdline
if '=' not in query:
cmdline.append(query)
self.log_message("command: %s", subprocess.list2cmdline(cmdline))
try:
nbytes = int(length)
except (TypeError, ValueError):
nbytes = 0
p = subprocess.Popen(cmdline,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env = env
)
if self.command.lower() == "post" and nbytes > 0:
data = self.rfile.read(nbytes)
else:
data = None
# throw away additional data [see bug #427345]
while select.select([self.rfile._sock], [], [], 0)[0]:
if not self.rfile._sock.recv(1):
break
stdout, stderr = p.communicate(data)
self.wfile.write(stdout)
if stderr:
self.log_error('%s', stderr)
p.stderr.close()
p.stdout.close()
status = p.returncode
if status:
self.log_error("CGI script exit status %#x", status)
else:
self.log_message("CGI script exited OK")
def test(HandlerClass = BaseHTTPRequestHandler,
ServerClass = HTTPServer, protocol="HTTP/1.0"):
"""Test the HTTP request handler class.
This runs an HTTP server on port 8000 (or the first command line
argument).
"""
if sys.argv[1:]:
port = int(sys.argv[1])
else:
port = 8000
server_address = ('', port)
HandlerClass.protocol_version = protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print("Serving HTTP on", sa[0], "port", sa[1], "...")
try:
httpd.serve_forever()
except KeyboardInterrupt:
print("\nKeyboard interrupt received, exiting.")
httpd.server_close()
sys.exit(0)
if __name__ == '__main__':
test(HandlerClass=SimpleHTTPRequestHandler)
|
|
import os.path
import re
import urlparse
from bs4 import BeautifulSoup
import requests
import clarify
import unicodecsv
from openelex.base.datasource import BaseDatasource
from openelex.lib import build_github_url
from openelex.lib.text import ocd_type_id
class Datasource(BaseDatasource):
RESULTS_PORTAL_URL = "http://www.sos.arkansas.gov/electionresults/index.php"
CLARITY_PORTAL_URL = "http://results.enr.clarityelections.com/AR/"
# There aren't precinct-level results for these, just a CSV file with
# summary data for the county.
no_precinct_urls = [
"http://results.enr.clarityelections.com/AR/Columbia/42858/111213/en/summary.html",
"http://results.enr.clarityelections.com/AR/Ouachita/42896/112694/en/summary.html",
"http://results.enr.clarityelections.com/AR/Union/42914/112664/en/summary.html",
]
def mappings(self, year=None):
mappings = []
for yr, elecs in self.elections(year).items():
mappings.extend(self._build_metadata(yr, elecs))
return mappings
def target_urls(self, year=None):
return [item['raw_url'] for item in self.mappings(year)]
def filename_url_pairs(self, year=None):
return [(item['generated_filename'], self._url_for_fetch(item))
for item in self.mappings(year)]
def unprocessed_filename_url_pairs(self, year=None):
return [(item['generated_filename'].replace(".csv", ".pdf"), item['raw_url'])
for item in self.mappings(year)
if 'pre_processed_url' in item]
def mappings_for_url(self, url):
return [mapping for mapping in self.mappings() if mapping['raw_url'] == url]
def _build_metadata(self, year, elections):
meta_entries = []
for election in elections:
meta_entries.extend(self._build_election_metadata(election))
return meta_entries
def _build_election_metadata(self, election):
"""
Return a list of metadata entries for a single election.
"""
slug = election['slug']
link = election['direct_links'][0]
if slug == 'ar-2000-11-07-general':
return self._build_election_metadata_2000_general(election)
elif slug in ('ar-2000-11-07-special-general',
'ar-2001-09-25-special-primary',
'ar-2001-10-16-special-primary-runoff',
'ar-2001-11-20-special-general'):
return self._build_election_metadata_zipped_special(election)
elif link.startswith(self.CLARITY_PORTAL_URL):
return self._build_election_metadata_clarity(election)
else:
return self._build_election_metadata_default(election)
def _build_election_metadata_default(self, election):
link = election['direct_links'][0]
filename_kwargs = {}
if link.startswith(self.RESULTS_PORTAL_URL):
# Report portal results are precinct-level
filename_kwargs['reporting_level'] = 'precinct'
# And the format is tab-delimited text
filename_kwargs['extension'] = '.tsv'
generated_filename = self._standardized_filename(election, **filename_kwargs)
mapping = {
"generated_filename": generated_filename,
"raw_url": link,
"ocd_id": 'ocd-division/country:us/state:ar',
"name": 'Arkansas',
"election": election['slug']
}
if "2002" in election['slug']:
generated_filename = generated_filename.replace('.pdf', '.csv')
mapping['pre_processed_url'] = build_github_url(self.state,
generated_filename)
mapping['generated_filename'] = generated_filename
return [mapping]
def _build_election_metadata_2000_general(self, election):
meta_entries = []
for county in self._counties():
county_name = county['name']
filename = self._standardized_filename(election,
jurisdiction=county_name, reporting_level='precinct',
extension='.txt')
raw_extracted_filename = self._raw_extracted_filename_2000_general(county_name)
meta_entries.append({
'generated_filename': filename,
'raw_url': election['direct_links'][0],
'raw_extracted_filename': raw_extracted_filename,
'ocd_id': county['ocd_id'],
'name': county_name,
'election': election['slug'],
})
return meta_entries
def _build_election_metadata_zipped_special(self, election):
meta_entries = []
url_paths = self._url_paths_for_election(election['slug'])
for path in url_paths:
filename_kwargs = {
'reporting_level': path['reporting_level'],
'extension': '.txt',
'office': path['office'],
'office_district': path['district'],
}
if path['reporting_level'] == 'precinct':
filename_kwargs['jurisdiction'] = path['jurisdiction']
jurisdiction = path['jurisdiction']
ocd_id = 'ocd-division/country:us/state:ar/county:{}'.format(ocd_type_id(jurisdiction))
else:
jurisdiction = 'Arkansas'
ocd_id = 'ocd-division/country:us/state:ar'
filename = self._standardized_filename(election, **filename_kwargs)
meta_entries.append({
'generated_filename': filename,
'raw_url': path['url'],
'raw_extracted_filename': path['raw_extracted_filename'],
'ocd_id': ocd_id,
'name': jurisdiction,
'election': election['slug'],
})
return meta_entries
def _raw_extracted_filename_2000_general(self, county_name):
county_part = county_name + " County"
county_part = county_part.upper().replace(' ', '')
return "cty{}.txt".format(county_part[:7])
def _build_election_metadata_clarity(self, election, fmt="xml"):
"""
Return metadata entries for election results provided by the Clarity
system.
These results seem to be for elections starting in 2012.
Keyword Arguments:
* fmt - Format of results file. Can be "xls", "txt" or "xml".
Default is "xml".
"""
base_url = election['direct_links'][0]
jurisdiction = clarity.Jurisdiction(url=base_url, level='state')
return self._build_election_metadata_clarity_county(election, fmt, jurisdiction) +\
self._build_election_metadata_clarity_precinct(election, fmt, jurisdiction)
def _build_election_metadata_clarity_county(self, election, fmt, jurisdiction):
return [{
"generated_filename": self._standardized_filename(election,
reporting_level='county', extension='.'+fmt),
"raw_extracted_filename": "detail.{}".format(fmt),
"raw_url": jurisdiction.report_url(fmt),
"ocd_id": 'ocd-division/country:us/state:ar',
"name": 'Arkansas',
"election": election['slug']
}]
def _build_election_metadata_clarity_precinct(self, election, fmt, jurisdiction):
meta_entries = []
for path in self._clarity_precinct_url_paths(election, fmt, jurisdiction):
jurisdiction_name = path['jurisdiction']
ocd_id = 'ocd-division/country:us/state:ar/county:{}'.format(ocd_type_id(jurisdiction_name))
filename = self._standardized_filename(election,
jurisdiction=jurisdiction_name, reporting_level='precinct',
extension='.'+fmt)
meta_entries.append({
"generated_filename": filename,
"raw_extracted_filename": "detail.{}".format(fmt),
"raw_url": path['url'],
"ocd_id": ocd_id,
"name": jurisdiction_name,
"election": election['slug'],
})
return meta_entries
def _clarity_precinct_url_paths_filename(self, election):
filename = self._standardized_filename(election, ['url_paths'],
reporting_level='precinct', extension='.csv')
return os.path.join(self.mappings_dir, filename)
def _clarity_precinct_url_paths(self, election, fmt, jurisdiction):
url_paths_filename = self._clarity_precinct_url_paths_filename(election)
if os.path.exists(url_paths_filename):
return self._url_paths(url_paths_filename)
url_paths = []
for subjurisdiction in jurisdiction.get_subjurisdictions():
if subjurisdiction.url not in self.no_precinct_urls:
url_paths.append({
'date': election['start_date'],
'office': '',
'race_type': election['race_type'],
'party': '',
'special': election['special'],
'url': subjurisdiction.report_url(fmt),
'reporting_level': 'precinct',
'jurisdiction': subjurisdiction.name,
})
with open(url_paths_filename, 'wb') as f:
fieldnames = ['date', 'office', 'race_type', 'party',
'special', 'url', 'reporting_level', 'jurisdiction']
writer = unicodecsv.DictWriter(f, fieldnames)
writer.writeheader()
writer.writerows(url_paths)
return url_paths
def _url_for_fetch(self, mapping):
if 'pre_processed_url' in mapping:
return mapping['pre_processed_url']
else:
return mapping['raw_url']
|
|
# -*- coding: utf-8 -*-
""" Unit tests for the omf plugin """
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
__author__ = "Stefano Simonelli"
__copyright__ = "Copyright (c) 2018 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
import asyncio
import logging
import pytest
import json
import time
import ast
import aiohttp
from unittest.mock import patch, MagicMock, ANY
from foglamp.tasks.north.sending_process import SendingProcess
from foglamp.plugins.north.pi_server import pi_server
import foglamp.tasks.north.sending_process as module_sp
from foglamp.common.storage_client import payload_builder
from foglamp.common.storage_client.storage_client import StorageClientAsync
_STREAM_ID = 1
# noinspection PyProtectedMember
@pytest.fixture
def fixture_omf(event_loop):
"""" Configures the OMF instance for the tests """
_omf = MagicMock()
pi_server._logger = MagicMock(spec=logging)
pi_server._config_omf_types = {"type-id": {"value": "0001"}}
return pi_server
# noinspection PyProtectedMember
@pytest.fixture
def fixture_omf_north(event_loop):
"""" Configures the OMF instance for the tests """
sending_process_instance = MagicMock()
config = []
config_omf_types = []
_logger = MagicMock(spec=logging)
omf_north = pi_server.PIServerNorthPlugin(sending_process_instance, config, config_omf_types, _logger)
omf_north._sending_process_instance._storage_async = MagicMock(spec=StorageClientAsync)
return omf_north
async def mock_async_call(p1=ANY):
""" mocks a generic async function """
return p1
class MockAiohttpClientSession(MagicMock):
"""" mock the aiohttp.ClientSession context manager """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.code = args[0]
self.text = args[1]
async def __aenter__(self):
mock_response = MagicMock(spec=aiohttp.ClientResponse)
mock_response.status = self.code
mock_response.text.side_effect = [mock_async_call(self.text)]
return mock_response
async def __aexit__(self, *args):
return None
class MockAiohttpClientSessionSuccess(MagicMock):
"""" mock the aiohttp.ClientSession context manager """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
async def __aenter__(self):
mock_response = MagicMock(spec=aiohttp.ClientResponse)
mock_response.status = 200
mock_response.text.side_effect = [mock_async_call('SUCCESS')]
return mock_response
async def __aexit__(self, *args):
return None
class MockAiohttpClientSessionError(MagicMock):
"""" mock the aiohttp.ClientSession context manager """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
async def __aenter__(self):
mock_response = MagicMock(spec=aiohttp.ClientResponse)
mock_response.status = 400
mock_response.text.side_effect = [mock_async_call('ERROR')]
return mock_response
async def __aexit__(self, *args):
return None
# noinspection PyUnresolvedReferences
@pytest.allure.feature("unit")
@pytest.allure.story("plugin", "north", "pi_server")
class TestPiServer:
"""Unit tests related to the public methods of the omf plugin """
def test_plugin_info(self):
assert pi_server.plugin_info() == {
'name': "PI Server North",
'version': "1.0.0",
'type': "north",
'interface': "1.0",
'config': pi_server._CONFIG_DEFAULT_OMF
}
def test_plugin_init_good(self):
"""Tests plugin_init using a good set of values"""
pi_server._logger = MagicMock()
# Used to check the conversions
data = {
"stream_id": {"value": 1},
"_CONFIG_CATEGORY_NAME": module_sp.SendingProcess._CONFIG_CATEGORY_NAME,
"URL": {"value": "test_URL"},
"producerToken": {"value": "test_producerToken"},
"OMFMaxRetry": {"value": "100"},
"OMFRetrySleepTime": {"value": "100"},
"OMFHttpTimeout": {"value": "100"},
"StaticData": {
"value": json.dumps(
{
"Location": "Palo Alto",
"Company": "Dianomic"
}
)
},
"destination_type": {"value": "1"},
'sending_process_instance': MagicMock(spec=SendingProcess),
"formatNumber": {"value": "float64"},
"formatInteger": {"value": "int64"},
"notBlockingErrors": {"value": "{'id': 400, 'message': 'none'}"},
"compression": {"value": "true"}
}
config_default_omf_types = pi_server.CONFIG_DEFAULT_OMF_TYPES
config_default_omf_types["type-id"]["value"] = "0001"
data["debug_level"] = None
data["log_performance"] = None
data["destination_id"] = 1
data["stream_id"] = 1
with patch.object(data['sending_process_instance'], '_fetch_configuration',
return_value=config_default_omf_types):
config = pi_server.plugin_init(data)
assert config['_CONFIG_CATEGORY_NAME'] == module_sp.SendingProcess._CONFIG_CATEGORY_NAME
assert config['URL'] == "test_URL"
assert config['producerToken'] == "test_producerToken"
assert config['OMFMaxRetry'] == 100
assert config['OMFRetrySleepTime'] == 100
assert config['OMFHttpTimeout'] == 100
# Check conversion from String to Dict
assert isinstance(config['StaticData'], dict)
@pytest.mark.parametrize("data", [
# Bad case 1 - StaticData is a python dict instead of a string containing a dict
{
"stream_id": {"value": 1},
"_CONFIG_CATEGORY_NAME": module_sp.SendingProcess._CONFIG_CATEGORY_NAME,
"URL": {"value": "test_URL"},
"producerToken": {"value": "test_producerToken"},
"OMFMaxRetry": {"value": "100"},
"OMFRetrySleepTime": {"value": "100"},
"OMFHttpTimeout": {"value": "100"},
"StaticData": {
"value":
{
"Location": "Palo Alto",
"Company": "Dianomic"
}
},
'sending_process_instance': MagicMock()
},
# Bad case 2 - OMFMaxRetry, bad value expected an int it is a string
{
"stream_id": {"value": 1},
"_CONFIG_CATEGORY_NAME": module_sp.SendingProcess._CONFIG_CATEGORY_NAME,
"URL": {"value": "test_URL"},
"producerToken": {"value": "test_producerToken"},
"OMFMaxRetry": {"value": "xxx"},
"OMFRetrySleepTime": {"value": "100"},
"OMFHttpTimeout": {"value": "100"},
"StaticData": {
"value": json.dumps(
{
"Location": "Palo Alto",
"Company": "Dianomic"
}
)
},
'sending_process_instance': MagicMock()
}
])
def test_plugin_init_bad(self, data):
"""Tests plugin_init using an invalid set of values"""
pi_server._logger = MagicMock()
with pytest.raises(Exception):
pi_server.plugin_init(data)
@pytest.mark.parametrize(
"ret_transform_in_memory_data, "
"p_raw_data, ",
[
(
# ret_transform_in_memory_data
# is_data_available - new_position - num_sent
[True, 20, 10],
# raw_data
{
"id": 10,
"asset_code": "test_asset_code",
"reading": {"humidity": 100, "temperature": 1001},
"user_ts": '2018-04-20 09:38:50.163164+00'
}
),
(
# ret_transform_in_memory_data
# is_data_available - new_position - num_sent
[False, 20, 10],
# raw_data
{
"id": 10,
"asset_code": "test_asset_code",
"reading": {"humidity": 100, "temperature": 1001},
"user_ts": '2018-04-20 09:38:50.163164+00'
}
),
]
)
@pytest.mark.asyncio
async def test_plugin_send_success(
self,
event_loop,
fixture_omf,
ret_transform_in_memory_data,
p_raw_data
):
""" Unit test for - plugin_send - successful case """
data = MagicMock()
if ret_transform_in_memory_data[0]:
# data_available
with patch.object(fixture_omf.PIServerNorthPlugin,
'transform_in_memory_data',
return_value=ret_transform_in_memory_data):
with patch.object(fixture_omf.PIServerNorthPlugin,
'create_omf_objects',
return_value=mock_async_call()
) as patched_create_omf_objects:
with patch.object(fixture_omf.PIServerNorthPlugin,
'send_in_memory_data_to_picromf',
return_value=mock_async_call()
) as patched_send_in_memory_data_to_picromf:
data_sent, new_position, num_sent = await fixture_omf.plugin_send(data, p_raw_data, _STREAM_ID)
assert patched_create_omf_objects.called
assert patched_send_in_memory_data_to_picromf.called
assert data_sent
assert new_position == ret_transform_in_memory_data[1]
assert num_sent == ret_transform_in_memory_data[2]
else:
# no data_available
with patch.object(fixture_omf.PIServerNorthPlugin,
'transform_in_memory_data',
return_value=ret_transform_in_memory_data):
data_sent, new_position, num_sent = await fixture_omf.plugin_send(data, p_raw_data, _STREAM_ID)
assert not data_sent
@pytest.mark.parametrize(
"ret_transform_in_memory_data, "
"p_raw_data, ",
[
(
# ret_transform_in_memory_data
# is_data_available - new_position - num_sent
[True, 20, 10],
# raw_data
{
"id": 10,
"asset_code": "test_asset_code",
"reading": {"humidity": 100, "temperature": 1001},
"user_ts": '2018-04-20 09:38:50.163164+00'
}
)
]
)
@pytest.mark.asyncio
async def test_plugin_send_error(
self,
event_loop,
fixture_omf,
ret_transform_in_memory_data,
p_raw_data
):
""" Unit test for - plugin_send - error handling case
it tests especially if the omf objects are created again in case of a communication error
NOTE : the test will print a message to the stderr containing 'mocked object generated an exception'
the message is not to be intended an as error as it is part of the successful test.
"""
data = MagicMock()
with patch.object(fixture_omf.PIServerNorthPlugin,
'transform_in_memory_data',
return_value=ret_transform_in_memory_data
) as patched_transform_in_memory_data:
with patch.object(fixture_omf.PIServerNorthPlugin,
'create_omf_objects',
return_value=mock_async_call()
) as patched_create_omf_objects:
with patch.object(fixture_omf.PIServerNorthPlugin,
'send_in_memory_data_to_picromf',
side_effect=KeyError('mocked object generated an exception')
) as patched_send_in_memory_data_to_picromf:
with patch.object(fixture_omf.PIServerNorthPlugin,
'deleted_omf_types_already_created',
return_value=mock_async_call()
) as patched_deleted_omf_types_already_created:
with pytest.raises(Exception):
data_sent, new_position, num_sent = await fixture_omf.plugin_send(data, p_raw_data,
_STREAM_ID)
if ret_transform_in_memory_data[0]:
# data_available
assert patched_transform_in_memory_data.calles
assert patched_create_omf_objects.called
assert patched_send_in_memory_data_to_picromf.called
assert patched_deleted_omf_types_already_created.called
def test_plugin_shutdown(self):
pi_server._logger = MagicMock()
data = []
pi_server.plugin_shutdown([data])
def test_plugin_reconfigure(self):
pi_server._logger = MagicMock()
pi_server.plugin_reconfigure()
class TestPIServerNorthPlugin:
"""Unit tests related to PIServerNorthPlugin, methods used internally to the plugin"""
@pytest.mark.parametrize(
"p_configuration_key, "
"p_type_id, "
"p_data_from_storage, "
"expected_data, ",
[
# Case 1
(
# p_configuration_key
"SEND_PR1",
# p_type_id
"0001",
# p_data_from_storage
{
"rows":
[
{
"configuration_key": "SEND_PR1",
"type_id": "0001",
"asset_code": "asset_code_1"
},
{
"configuration_key": "SEND_PR1",
"type_id": "0001",
"asset_code": "asset_code_2"
}
]
},
# expected_data
[
"asset_code_1",
"asset_code_2"
]
)
]
)
@pytest.mark.asyncio
async def test_retrieve_omf_types_already_created(
self,
p_configuration_key,
p_type_id,
p_data_from_storage,
expected_data,
fixture_omf_north
):
""" Unit test for - _retrieve_omf_types_already_created - successful case """
_payload_builder = MagicMock(spec=payload_builder)
@pytest.mark.asyncio
async def mock_query_tbl_with_payload():
""" mock _query_tbl_with_payload """
return p_data_from_storage
with patch.object(_payload_builder, 'PayloadBuilder', return_value=True):
with patch.object(fixture_omf_north._sending_process_instance._storage_async,
'query_tbl_with_payload',
return_value=mock_query_tbl_with_payload()):
retrieved_rows = await fixture_omf_north._retrieve_omf_types_already_created(p_configuration_key, p_type_id)
assert retrieved_rows == expected_data
@pytest.mark.parametrize(
"p_asset_code, "
"expected_asset_code, ",
[
# p_asset_code # expected_asset_code
("asset_code_1 ", "asset_code_1"),
(" asset_code_2 ", "asset_code_2"),
("asset_ code_3", "asset_code_3"),
]
)
def test_generate_omf_asset_id(
self,
p_asset_code,
expected_asset_code,
fixture_omf_north
):
"""Tests _generate_omf_asset_id """
generated_asset_code = fixture_omf_north._generate_omf_asset_id(p_asset_code)
assert generated_asset_code == expected_asset_code
@pytest.mark.parametrize(
"p_type_id, "
"p_asset_code, "
"expected_measurement_id, ",
[
# p_type_id - p_asset_code - expected_asset_code
("0001", "asset_code_1 ", "0001measurement_asset_code_1"),
("0002", " asset_code_2 ", "0002measurement_asset_code_2"),
("0003", "asset_ code_3", "0003measurement_asset_code_3"),
]
)
def test_generate_omf_measurement(
self,
p_type_id,
p_asset_code,
expected_measurement_id,
fixture_omf_north
):
"""Tests _generate_omf_measurement """
fixture_omf_north._config_omf_types = {"type-id": {"value": p_type_id}}
generated_measurement_id = fixture_omf_north._generate_omf_measurement(p_asset_code)
assert generated_measurement_id == expected_measurement_id
@pytest.mark.parametrize(
"p_asset_code, "
"expected_typename, ",
[
# p_asset_code - expected_asset_code
("asset_code_1 ", "asset_code_1_typename"),
(" asset_code_2 ", "asset_code_2_typename"),
("asset_ code_3", "asset_code_3_typename"),
]
)
def test_generate_omf_typename_automatic(
self,
p_asset_code,
expected_typename
):
"""Tests _generate_omf_typename_automatic """
sending_process_instance = MagicMock()
config = []
config_omf_types = []
logger = MagicMock()
omf_north = pi_server.PIServerNorthPlugin(sending_process_instance, config, config_omf_types, logger)
generated_typename = omf_north._generate_omf_typename_automatic(p_asset_code)
assert generated_typename == expected_typename
@pytest.mark.parametrize(
"p_test_data, "
"p_type_id, "
"p_static_data, "
"expected_typename,"
"expected_omf_type",
[
# Case 1 - pressure / Number
(
# Origin - Sensor data
{"asset_code": "pressure", "asset_data": {"pressure": 921.6}},
# type_id
"0001",
# Static Data
{
"Location": "Palo Alto",
"Company": "Dianomic"
},
# Expected
'pressure_typename',
{
'pressure_typename':
[
{
'classification': 'static',
'id': '0001_pressure_typename_sensor',
'properties': {
'Company': {'type': 'string'},
'Name': {'isindex': True, 'type': 'string'},
'Location': {'type': 'string'}
},
'type': 'object'
},
{
'classification': 'dynamic',
'id': '0001_pressure_typename_measurement',
'properties': {
'Time': {'isindex': True, 'format': 'date-time', 'type': 'string'},
'pressure': {'type': 'number', 'format': 'float64'}
},
'type': 'object'
}
]
}
),
# Case 2 - luxometer / Integer
(
# Origin - Sensor data
{"asset_code": "luxometer", "asset_data": {"lux": 20}},
# type_id
"0002",
# Static Data
{
"Location": "Palo Alto",
"Company": "Dianomic"
},
# Expected
'luxometer_typename',
{
'luxometer_typename':
[
{
'classification': 'static',
'id': '0002_luxometer_typename_sensor',
'properties': {
'Company': {'type': 'string'},
'Name': {'isindex': True, 'type': 'string'},
'Location': {'type': 'string'}
},
'type': 'object'
},
{
'classification': 'dynamic',
'id': '0002_luxometer_typename_measurement',
'properties': {
'Time': {'isindex': True, 'format': 'date-time', 'type': 'string'},
'lux': {'type': 'number', 'format': 'float64'}
},
'type': 'object'
}
]
}
),
# Case 3 - switch / string
(
# Origin - Sensor data
{"asset_code": "switch", "asset_data": {"button": "up"}},
# type_id
"0002",
# Static Data
{
"Location": "Palo Alto",
"Company": "Dianomic"
},
# Expected
'switch_typename',
{
'switch_typename':
[
{
'classification': 'static',
'id': '0002_switch_typename_sensor',
'properties': {
'Company': {'type': 'string'},
'Name': {'isindex': True, 'type': 'string'},
'Location': {'type': 'string'}
},
'type': 'object'
},
{
'classification': 'dynamic',
'id': '0002_switch_typename_measurement',
'properties': {
'Time': {'isindex': True, 'format': 'date-time', 'type': 'string'},
'button': {'type': 'string'}
},
'type': 'object'
}
]
}
)
]
)
@pytest.mark.asyncio
async def test_create_omf_type_automatic(
self,
p_test_data,
p_type_id,
p_static_data,
expected_typename,
expected_omf_type,
fixture_omf_north
):
""" Unit test for - _create_omf_type_automatic - successful case
Tests the generation of the OMF messages starting from Asset name and data
using Automatic OMF Type Mapping
"""
fixture_omf_north._config_omf_types = {"type-id": {"value": p_type_id}}
fixture_omf_north._config = {}
fixture_omf_north._config["StaticData"] = p_static_data
fixture_omf_north._config["formatNumber"] = "float64"
fixture_omf_north._config["formatInteger"] = "int64"
with patch.object(
fixture_omf_north,
'send_in_memory_data_to_picromf',
return_value=mock_async_call()
) as patched_send_in_memory_data_to_picromf:
typename, omf_type = await fixture_omf_north._create_omf_type_automatic(p_test_data)
assert typename == expected_typename
assert omf_type == expected_omf_type
patched_send_in_memory_data_to_picromf.assert_called_with ("Type", expected_omf_type[expected_typename])
@pytest.mark.parametrize(
"p_type_id, "
"p_asset_code_omf_type, "
"expected_typename, "
"expected_omf_type, ",
[
# Case 1
(
# p_type_id
"0001",
# p_asset_code_omf_type
{
"typename": "position",
"static": {
"Name": {
"type": "string",
"isindex": True
},
"Location": {
"type": "string"
}
},
"dynamic": {
"Time": {
"type": "string",
"format": "date-time",
"isindex": True
},
"x": {
"type": "number"
},
"y": {
"type": "number"
},
"z": {
"type": "number"
}
}
},
# expected_typename
"position",
# expected_omf_type
{
"position": [
{
"id": "0001_position_sensor",
"type": "object",
"classification": "static",
"properties": {
"Name": {
"type": "string",
"isindex": True
},
"Location": {
"type": "string"
}
}
},
{
"id": "0001_position_measurement",
"type": "object",
"classification": "dynamic",
"properties": {
"Time": {
"type": "string",
"format": "date-time",
"isindex": True
},
"x": {
"type": "number"
},
"y": {
"type": "number"
},
"z": {
"type": "number"
}
}
}
]
}
)
]
)
@pytest.mark.asyncio
async def test_create_omf_type_configuration_based(
self,
p_type_id,
p_asset_code_omf_type,
expected_typename,
expected_omf_type,
fixture_omf_north
):
""" Unit test for - _create_omf_type_configuration_based - successful case
Tests the generation of the OMF messages using Configuration Based OMF Type Mapping
"""
fixture_omf_north._config_omf_types = {"type-id": {"value": p_type_id}}
with patch.object(fixture_omf_north,
'send_in_memory_data_to_picromf',
return_value=mock_async_call()
) as patched_send_to_picromf:
generated_typename, \
generated_omf_type = await fixture_omf_north._create_omf_type_configuration_based(p_asset_code_omf_type)
assert generated_typename == expected_typename
assert generated_omf_type == expected_omf_type
patched_send_to_picromf.assert_any_call("Type", expected_omf_type[expected_typename])
@pytest.mark.parametrize(
"p_asset,"
"p_type_id, "
"p_static_data, "
"p_typename,"
"p_omf_type, "
"expected_container, "
"expected_static_data, "
"expected_link_data ",
[
# Case 1 - pressure / Number
(
# p_asset
{"asset_code": "pressure", "asset_data": {"pressure": 921.6}},
# type_id
"0001",
# Static Data
{
"Location": "Palo Alto",
"Company": "Dianomic"
},
# p_typename
'pressure_typename',
# p_omf_type
{
'pressure_typename':
[
{
'classification': 'static',
'id': '0001_pressure_typename_sensor',
'properties': {
'Company': {'type': 'string'},
'Name': {'isindex': True, 'type': 'string'},
'Location': {'type': 'string'}
},
'type': 'object'
},
{
'classification': 'dynamic',
'id': '0001_pressure_typename_measurement',
'properties': {
'Time': {'isindex': True, 'format': 'date-time', 'type': 'string'},
'pressure': {'type': 'number'}
},
'type': 'object'
}
]
},
# expected_container
[
{
'typeid': '0001_pressure_typename_measurement',
'id': '0001measurement_pressure'
}
],
# expected_static_data
[
{
'typeid': '0001_pressure_typename_sensor',
'values': [
{
'Company': 'Dianomic',
'Location': 'Palo Alto',
'Name': 'pressure'
}
]
}
],
# expected_link_data
[
{
'typeid': '__Link', 'values': [
{
'source': {'typeid': '0001_pressure_typename_sensor', 'index': '_ROOT'},
'target': {'typeid': '0001_pressure_typename_sensor', 'index': 'pressure'}
},
{
'source': {'typeid': '0001_pressure_typename_sensor', 'index': 'pressure'},
'target': {'containerid': '0001measurement_pressure'}
}
]
}
]
)
]
)
@pytest.mark.asyncio
async def test_create_omf_object_links(
self,
p_asset,
p_type_id,
p_static_data,
p_typename,
p_omf_type,
expected_container,
expected_static_data,
expected_link_data,
fixture_omf_north
):
fixture_omf_north._config_omf_types = {"type-id": {"value": p_type_id}}
fixture_omf_north._config = {"StaticData": p_static_data}
with patch.object(fixture_omf_north,
'send_in_memory_data_to_picromf',
side_effect=[asyncio.ensure_future(mock_async_call()) for x in range(3)]
) as patched_send_to_picromf:
await fixture_omf_north._create_omf_object_links(p_asset["asset_code"], p_typename, p_omf_type)
assert patched_send_to_picromf.call_count == 3
patched_send_to_picromf.assert_any_call("Container", expected_container)
patched_send_to_picromf.assert_any_call("Data", expected_static_data)
patched_send_to_picromf.assert_any_call("Data", expected_link_data)
@pytest.mark.parametrize(
"p_creation_type, "
"p_data_origin, "
"p_asset_codes_already_created, "
"p_omf_objects_configuration_based ",
[
# Case 1 - automatic
(
# p_creation_type
"automatic",
# Origin
[
{
"id": 10,
"asset_code": "test_asset_code",
"reading": {"humidity": 10, "temperature": 20},
"user_ts": '2018-04-20 09:38:50.163164+00'
}
],
# asset_codes_already_created
[
"test_none"
],
# omf_objects_configuration_based
{"none": "none"}
),
# Case 2 - configuration
(
# p_creation_type
"configuration",
# Origin
[
{
"id": 10,
"asset_code": "test_asset_code",
"reading": {"humidity": 10, "temperature": 20},
"user_ts": '2018-04-20 09:38:50.163164+00'
}
],
# asset_codes_already_created
[
"test_none"
],
# omf_objects_configuration_based
{"test_asset_code": {"value": "test_asset_code"}}
)
]
)
@pytest.mark.asyncio
async def test_create_omf_objects(
self,
p_creation_type,
p_data_origin,
p_asset_codes_already_created,
p_omf_objects_configuration_based,
fixture_omf_north
):
""" Unit test for - create_omf_objects - successful case
Tests the evaluation of the 2 ways of creating OMF objects: automatic or configuration based
"""
config_category_name = "SEND_PR"
type_id = "0001"
fixture_omf_north._config_omf_types = {"type-id": {"value": type_id}}
fixture_omf_north._config_omf_types = p_omf_objects_configuration_based
if p_creation_type == "automatic":
with patch.object(fixture_omf_north,
'_retrieve_omf_types_already_created',
return_value=mock_async_call(p_asset_codes_already_created)):
with patch.object(
fixture_omf_north,
'_create_omf_objects_automatic',
return_value=mock_async_call()
) as patched_create_omf_objects_automatic:
with patch.object(fixture_omf_north,
'_flag_created_omf_type',
return_value=mock_async_call()
) as patched_flag_created_omf_type:
await fixture_omf_north.create_omf_objects(p_data_origin, config_category_name, type_id)
assert patched_create_omf_objects_automatic.called
assert patched_flag_created_omf_type.called
elif p_creation_type == "configuration":
with patch.object(fixture_omf_north,
'_retrieve_omf_types_already_created',
return_value=mock_async_call(p_asset_codes_already_created)):
with patch.object(
fixture_omf_north,
'_create_omf_objects_configuration_based',
return_value=mock_async_call()
) as patched_create_omf_objects_configuration_based:
with patch.object(fixture_omf_north,
'_flag_created_omf_type',
return_value=mock_async_call()
) as patched_flag_created_omf_type:
await fixture_omf_north.create_omf_objects(p_data_origin, config_category_name, type_id)
assert patched_create_omf_objects_configuration_based.called
assert patched_flag_created_omf_type.called
else:
raise Exception("ERROR : creation type not defined !")
@pytest.mark.parametrize(
"p_key, "
"p_value, "
"expected, ",
[
# Good cases
('producerToken', "xxx", "good"),
# Bad cases
('NO-producerToken', "", "exception"),
('producerToken', "", "exception")
]
)
def test_validate_configuration(
self,
p_key,
p_value,
expected):
""" Tests the validation of the configurations retrieved from the Configuration Manager
handled by _validate_configuration """
pi_server._logger = MagicMock()
data = {p_key: {'value': p_value}}
if expected == "good":
assert not pi_server._logger.error.called
elif expected == "exception":
with pytest.raises(ValueError):
pi_server._validate_configuration(data)
assert pi_server._logger.error.called
@pytest.mark.parametrize(
"p_key, "
"p_value, "
"expected, ",
[
# Good cases
('type-id', "xxx", "good"),
# Bad cases
('NO-type-id', "", "exception"),
('type-id', "", "exception")
]
)
def test_validate_configuration_omf_type(
self,
p_key,
p_value,
expected):
""" Tests the validation of the configurations retrieved from the Configuration Manager
related to the OMF types """
pi_server._logger = MagicMock()
data = {p_key: {'value': p_value}}
if expected == "good":
assert not pi_server._logger.error.called
elif expected == "exception":
with pytest.raises(ValueError):
pi_server._validate_configuration_omf_type(data)
assert pi_server._logger.error.called
@pytest.mark.parametrize(
"p_test_data ",
[
# Case 1 - pressure / Number
(
{
'dummy': 'dummy'
}
),
]
)
@pytest.mark.asyncio
async def test_send_in_memory_data_to_picromf_success(
self,
p_test_data,
fixture_omf_north):
""" Unit test for - send_in_memory_data_to_picromf - successful case
Tests a successful communication
"""
fixture_omf_north._config = dict(producerToken="dummy_producerToken")
fixture_omf_north._config["URL"] = "dummy_URL"
fixture_omf_north._config["OMFRetrySleepTime"] = 1
fixture_omf_north._config["OMFHttpTimeout"] = 1
fixture_omf_north._config["OMFMaxRetry"] = 1
fixture_omf_north._config["compression"] = "false"
with patch.object(aiohttp.ClientSession,
'post',
return_value=MockAiohttpClientSessionSuccess()
) as patched_aiohttp:
await fixture_omf_north.send_in_memory_data_to_picromf("Type", p_test_data)
assert patched_aiohttp.called
assert patched_aiohttp.call_count == 1
@pytest.mark.parametrize(
"p_is_error, "
"p_code, "
"p_text, "
"p_test_data ",
[
(
False,
400, 'Invalid value type for the property',
{'dummy': 'dummy'}
),
(
False,
400, 'Redefinition of the type with the same ID is not allowed',
{'dummy': 'dummy'}
),
(
True,
400, 'None',
{'dummy': 'dummy'}
),
(
True,
404, 'None',
{'dummy': 'dummy'}
),
(
True,
404, 'Invalid value type for the property',
{'dummy': 'dummy'}
),
]
)
@pytest.mark.asyncio
async def test_send_in_memory_data_to_picromf_success_not_blocking_error(
self,
p_is_error,
p_code,
p_text,
p_test_data,
fixture_omf_north):
""" Unit test for - send_in_memory_data_to_picromf
test cases of blocking error (exception raised) and not blocking error
"""
fixture_omf_north._config = dict(producerToken="dummy_producerToken")
fixture_omf_north._config["URL"] = "dummy_URL"
fixture_omf_north._config["OMFRetrySleepTime"] = 1
fixture_omf_north._config["OMFHttpTimeout"] = 1
fixture_omf_north._config["OMFMaxRetry"] = 1
fixture_omf_north._config["compression"] = "false"
fixture_omf_north._config["notBlockingErrors"] = ast.literal_eval(pi_server._CONFIG_DEFAULT_OMF["notBlockingErrors"]["default"])
with patch.object(aiohttp.ClientSession,
'post',
return_value=MockAiohttpClientSession(p_code, p_text)
) as patched_aiohttp:
if p_is_error:
# blocking error (exception raised)
with pytest.raises(Exception):
await fixture_omf_north.send_in_memory_data_to_picromf("Data", p_test_data)
else:
# not blocking error, operation terminated successfully
await fixture_omf_north.send_in_memory_data_to_picromf("Data", p_test_data)
assert patched_aiohttp.called
# as OMFMaxRetry is set to 1
assert patched_aiohttp.call_count == 1
@pytest.mark.parametrize(
"p_type, "
"p_test_data ",
[
# Case 1 - pressure / Number
(
"Type",
{
'pressure_typename':
[
{
'classification': 'static',
'id': '0001_pressure_typename_sensor',
'properties': {
'Company': {'type': 'string'},
'Name': {'isindex': True, 'type': 'string'},
'Location': {'type': 'string'}
},
'type': 'object'
},
{
'classification': 'dynamic',
'id': '0001_pressure_typename_measurement',
'properties': {
'Time': {'isindex': True, 'format': 'date-time', 'type': 'string'},
'pressure': {'type': 'number'}
},
'type': 'object'
}
]
}
),
]
)
@pytest.mark.asyncio
async def test_send_in_memory_data_to_picromf_data(
self,
p_type,
p_test_data,
fixture_omf_north):
""" Unit test for - send_in_memory_data_to_picromf - successful case
Tests the data sent to the PI Server in relation of an OMF type
"""
# Values for the test
test_url = "test_URL"
test_producer_token = "test_producerToken"
test_omf_http_timeout = 1
test_headers = {
'producertoken': test_producer_token,
'messagetype': p_type,
'action': 'create',
'messageformat': 'JSON',
'omfversion': '1.0'}
fixture_omf_north._config = dict(producerToken=test_producer_token)
fixture_omf_north._config["URL"] = test_url
fixture_omf_north._config["OMFRetrySleepTime"] = 1
fixture_omf_north._config["OMFHttpTimeout"] = test_omf_http_timeout
fixture_omf_north._config["OMFMaxRetry"] = 1
fixture_omf_north._config["compression"] = "false"
# To avoid the wait time
with patch.object(time, 'sleep', return_value=True):
with patch.object(aiohttp.ClientSession,
'post',
return_value=MockAiohttpClientSessionSuccess()
) as patched_aiohttp:
await fixture_omf_north.send_in_memory_data_to_picromf(p_type, p_test_data)
str_data = json.dumps(p_test_data)
assert patched_aiohttp.call_count == 1
patched_aiohttp.assert_called_with(
url=test_url,
headers=test_headers,
data=str_data,
timeout=test_omf_http_timeout)
@pytest.mark.parametrize(
"p_test_data ",
[
# Case 1 - pressure / Number
(
{
'dummy': 'dummy'
}
),
]
)
@pytest.mark.asyncio
async def test_send_in_memory_data_to_picromf_error(
self,
p_test_data,
fixture_omf_north):
""" Unit test for - send_in_memory_data_to_picromf - successful case
Tests the behaviour in case of communication error:
exception erased,
message logged
and number of retries
"""
max_retry = 3
fixture_omf_north._config = dict(producerToken="dummy_producerToken")
fixture_omf_north._config["URL"] = "dummy_URL"
fixture_omf_north._config["OMFRetrySleepTime"] = 1
fixture_omf_north._config["OMFHttpTimeout"] = 1
fixture_omf_north._config["OMFMaxRetry"] = max_retry
fixture_omf_north._config["compression"] = "false"
fixture_omf_north._config["notBlockingErrors"] = [{'id': 400, 'message': 'none'}]
# To avoid the wait time
with patch.object(time, 'sleep', return_value=True):
with patch.object(aiohttp.ClientSession,
'post',
return_value=MockAiohttpClientSessionError()
) as patched_aiohttp:
# Tests the raising of the exception
with pytest.raises(Exception):
await fixture_omf_north.send_in_memory_data_to_picromf("Type", p_test_data)
assert patched_aiohttp.call_count == max_retry
@pytest.mark.parametrize(
"p_data_origin, "
"type_id, "
"expected_data_to_send, "
"expected_is_data_available, "
"expected_new_position, "
"expected_num_sent", [
# Case 1
(
# Origin
[
{
"id": 10,
"asset_code": "test_asset_code",
"reading": {"humidity": 11, "temperature": 38},
"user_ts": '2018-04-20 09:38:50.163164+00'
}
],
"0001",
# Transformed
[
{
"containerid": "0001measurement_test_asset_code",
"values": [
{
"Time": "2018-04-20T09:38:50.163Z",
"humidity": 11,
"temperature": 38
}
]
}
],
True, 10, 1
),
# Case 2
(
# Origin
[
{
"id": 11,
"asset_code": "test_asset_code",
"reading": {"tick": "tock"},
"user_ts": '2018-04-20 09:38:50.163164+00'
}
],
"0001",
# Transformed
[
{
"containerid": "0001measurement_test_asset_code",
"values": [
{
"Time": "2018-04-20T09:38:50.163Z",
"tick": "tock"
}
]
}
],
True, 11, 1
),
# Case 3 - 2 rows
(
# Origin
[
{
"id": 12,
"asset_code": "test_asset_code",
"reading": {"pressure": 957.2},
"user_ts": '2018-04-20 09:38:50.163164+00'
},
{
"id": 20,
"asset_code": "test_asset_code",
"reading": {"y": 34, "z": 114, "x": -174},
"user_ts": '2018-04-20 09:38:50.163164+00'
}
],
"0001",
# Transformed
[
{
"containerid": "0001measurement_test_asset_code",
"values": [
{
"Time": "2018-04-20T09:38:50.163Z",
"pressure": 957.2
}
]
},
{
"containerid": "0001measurement_test_asset_code",
"values": [
{
"Time": "2018-04-20T09:38:50.163Z",
"y": 34,
"z": 114,
"x": -174,
}
]
},
],
True, 20, 2
)
])
def test_plugin_transform_in_memory_data(self,
p_data_origin,
type_id,
expected_data_to_send,
expected_is_data_available,
expected_new_position,
expected_num_sent,
fixture_omf_north):
"""Tests the plugin in memory transformations """
generated_data_to_send = [None for x in range( len(expected_data_to_send) )]
fixture_omf_north._config_omf_types = {"type-id": {"value": type_id}}
is_data_available, new_position, num_sent = fixture_omf_north.transform_in_memory_data(generated_data_to_send,
p_data_origin)
assert generated_data_to_send == expected_data_to_send
assert is_data_available == expected_is_data_available
assert new_position == expected_new_position
assert num_sent == expected_num_sent
|
|
# "High performance data structures
# "
# copied from pypy repo
#
# Copied and completed from the sandbox of CPython
# (nondist/sandbox/collections/pydeque.py rev 1.1, Raymond Hettinger)
#
# edited for Brython line 558 : catch ImportError instead of AttributeError
import operator
#try:
# from thread import get_ident as _thread_ident
#except ImportError:
def _thread_ident():
return -1
n = 30
LFTLNK = n
RGTLNK = n+1
BLOCKSIZ = n+2
# The deque's size limit is d.maxlen. The limit can be zero or positive, or
# None. After an item is added to a deque, we check to see if the size has
# grown past the limit. If it has, we get the size back down to the limit by
# popping an item off of the opposite end. The methods that can trigger this
# are append(), appendleft(), extend(), and extendleft().
#class deque(object):
class deque:
def __new__(cls, iterable=(), *args, **kw):
#fixme
#self = super(deque, cls).__new__(cls, *args, **kw)
self=object.__new__(cls, *args, **kw)
self.clear()
return self
def __init__(self, iterable=(), maxlen=None):
object.__init__(self)
self.clear()
if maxlen is not None:
if maxlen < 0:
raise ValueError("maxlen must be non-negative")
self._maxlen = maxlen
add = self.append
for elem in iterable:
add(elem)
@property
def maxlen(self):
return self._maxlen
def clear(self):
self.right = self.left = [None] * BLOCKSIZ
self.rightndx = n//2 # points to last written element
self.leftndx = n//2+1
self.length = 0
self.state = 0
def append(self, x):
self.state += 1
self.rightndx += 1
if self.rightndx == n:
newblock = [None] * BLOCKSIZ
self.right[RGTLNK] = newblock
newblock[LFTLNK] = self.right
self.right = newblock
self.rightndx = 0
self.length += 1
self.right[self.rightndx] = x
if self.maxlen is not None and self.length > self.maxlen:
self.popleft()
def appendleft(self, x):
self.state += 1
self.leftndx -= 1
if self.leftndx == -1:
newblock = [None] * BLOCKSIZ
self.left[LFTLNK] = newblock
newblock[RGTLNK] = self.left
self.left = newblock
self.leftndx = n-1
self.length += 1
self.left[self.leftndx] = x
if self.maxlen is not None and self.length > self.maxlen:
self.pop()
def extend(self, iterable):
if iterable is self:
iterable = list(iterable)
for elem in iterable:
self.append(elem)
def extendleft(self, iterable):
if iterable is self:
iterable = list(iterable)
for elem in iterable:
self.appendleft(elem)
def pop(self):
if self.left is self.right and self.leftndx > self.rightndx:
#raise IndexError, "pop from an empty deque" # does not work in brython
raise IndexError("pop from an empty deque")
x = self.right[self.rightndx]
self.right[self.rightndx] = None
self.length -= 1
self.rightndx -= 1
self.state += 1
if self.rightndx == -1:
prevblock = self.right[LFTLNK]
if prevblock is None:
# the deque has become empty; recenter instead of freeing block
self.rightndx = n//2
self.leftndx = n//2+1
else:
prevblock[RGTLNK] = None
self.right[LFTLNK] = None
self.right = prevblock
self.rightndx = n-1
return x
def popleft(self):
if self.left is self.right and self.leftndx > self.rightndx:
#raise IndexError, "pop from an empty deque"
raise IndexError("pop from an empty deque")
x = self.left[self.leftndx]
self.left[self.leftndx] = None
self.length -= 1
self.leftndx += 1
self.state += 1
if self.leftndx == n:
prevblock = self.left[RGTLNK]
if prevblock is None:
# the deque has become empty; recenter instead of freeing block
self.rightndx = n//2
self.leftndx = n//2+1
else:
prevblock[LFTLNK] = None
self.left[RGTLNK] = None
self.left = prevblock
self.leftndx = 0
return x
def count(self, value):
c = 0
for item in self:
if item == value:
c += 1
return c
def remove(self, value):
# Need to be defensive for mutating comparisons
for i in range(len(self)):
if self[i] == value:
del self[i]
return
raise ValueError("deque.remove(x): x not in deque")
def rotate(self, n=1):
length = len(self)
if length == 0:
return
halflen = (length+1) >> 1
if n > halflen or n < -halflen:
n %= length
if n > halflen:
n -= length
elif n < -halflen:
n += length
while n > 0:
self.appendleft(self.pop())
n -= 1
while n < 0:
self.append(self.popleft())
n += 1
def reverse(self):
"reverse *IN PLACE*"
leftblock = self.left
rightblock = self.right
leftindex = self.leftndx
rightindex = self.rightndx
for i in range(self.length // 2):
# Validate that pointers haven't met in the middle
assert leftblock != rightblock or leftindex < rightindex
# Swap
(rightblock[rightindex], leftblock[leftindex]) = (
leftblock[leftindex], rightblock[rightindex])
# Advance left block/index pair
leftindex += 1
if leftindex == n:
leftblock = leftblock[RGTLNK]
assert leftblock is not None
leftindex = 0
# Step backwards with the right block/index pair
rightindex -= 1
if rightindex == -1:
rightblock = rightblock[LFTLNK]
assert rightblock is not None
rightindex = n - 1
def __repr__(self):
threadlocalattr = '__repr' + str(_thread_ident())
if threadlocalattr in self.__dict__:
return 'deque([...])'
else:
self.__dict__[threadlocalattr] = True
try:
if self.maxlen is not None:
return 'deque(%r, maxlen=%s)' % (list(self), self.maxlen)
else:
return 'deque(%r)' % (list(self),)
finally:
del self.__dict__[threadlocalattr]
def __iter__(self):
return deque_iterator(self, self._iter_impl)
def _iter_impl(self, original_state, giveup):
if self.state != original_state:
giveup()
block = self.left
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
for elem in block[l:r]:
yield elem
if self.state != original_state:
giveup()
block = block[RGTLNK]
def __reversed__(self):
return deque_iterator(self, self._reversed_impl)
def _reversed_impl(self, original_state, giveup):
if self.state != original_state:
giveup()
block = self.right
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
for elem in reversed(block[l:r]):
yield elem
if self.state != original_state:
giveup()
block = block[LFTLNK]
def __len__(self):
#sum = 0
#block = self.left
#while block:
# sum += n
# block = block[RGTLNK]
#return sum + self.rightndx - self.leftndx + 1 - n
return self.length
def __getref(self, index):
if index >= 0:
block = self.left
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
span = r-l
if index < span:
return block, l+index
index -= span
block = block[RGTLNK]
else:
block = self.right
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
negative_span = l-r
if index >= negative_span:
return block, r+index
index -= negative_span
block = block[LFTLNK]
raise IndexError("deque index out of range")
def __getitem__(self, index):
block, index = self.__getref(index)
return block[index]
def __setitem__(self, index, value):
block, index = self.__getref(index)
block[index] = value
def __delitem__(self, index):
length = len(self)
if index >= 0:
if index >= length:
raise IndexError("deque index out of range")
self.rotate(-index)
self.popleft()
self.rotate(index)
else:
#index = ~index #todo until bit wise operators are in bython
index= index^(2**31)
if index >= length:
raise IndexError("deque index out of range")
self.rotate(index)
self.pop()
self.rotate(-index)
def __reduce_ex__(self, proto):
return type(self), (list(self), self.maxlen)
def __hash__(self):
#raise TypeError, "deque objects are unhashable"
raise TypeError("deque objects are unhashable")
def __copy__(self):
return self.__class__(self, self.maxlen)
# XXX make comparison more efficient
def __eq__(self, other):
if isinstance(other, deque):
return list(self) == list(other)
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, deque):
return list(self) != list(other)
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, deque):
return list(self) < list(other)
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, deque):
return list(self) <= list(other)
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, deque):
return list(self) > list(other)
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, deque):
return list(self) >= list(other)
else:
return NotImplemented
def __iadd__(self, other):
self.extend(other)
return self
class deque_iterator(object):
def __init__(self, deq, itergen):
self.counter = len(deq)
def giveup():
self.counter = 0
#raise RuntimeError, "deque mutated during iteration"
raise RuntimeError("deque mutated during iteration")
self._gen = itergen(deq.state, giveup)
def next(self):
res = self._gen.next()
self.counter -= 1
return res
def __iter__(self):
return self
class defaultdict(dict):
def __init__(self, *args, **kwds):
if len(args) > 0:
default_factory = args[0]
args = args[1:]
if not callable(default_factory) and default_factory is not None:
raise TypeError("first argument must be callable")
else:
default_factory = None
dict.__init__(self, args, kwds)
self.default_factory = default_factory
self.update(args, kwds)
#super(defaultdict, self).__init__(*args, **kwds)
#fixme.. had to add this function to get defaultdict working with brython correctly
def __getitem__(self, key):
if self.__contains__(key):
return dict.__getitem__(self,key)
return self.__missing__(key)
def __missing__(self, key):
# from defaultdict docs
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __repr__(self, recurse=set()):
if id(self) in recurse:
return "defaultdict(...)"
try:
recurse.add(id(self))
return "defaultdict(%s, %s)" % (repr(self.default_factory), super(defaultdict, self).__repr__())
finally:
recurse.remove(id(self))
def copy(self):
return type(self)(self.default_factory, self)
def __copy__(self):
return self.copy()
def __reduce__(self):
#
#__reduce__ must return a 5-tuple as follows:
#
# - factory function
# - tuple of args for the factory function
# - additional state (here None)
# - sequence iterator (here None)
# - dictionary iterator (yielding successive (key, value) pairs
# This API is used by pickle.py and copy.py.
#
return (type(self), (self.default_factory,), None, None, self.iteritems())
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
def namedtuple(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
if rename:
names = list(field_names)
seen = set()
for i, name in enumerate(names):
if (not min(c.isalnum() or c=='_' for c in name) or _iskeyword(name)
or not name or name[0].isdigit() or name.startswith('_')
or name in seen):
names[i] = '_%d' % i
seen.add(name)
field_names = tuple(names)
for name in (typename,) + field_names:
if not min(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(_cls, %(argtxt)s):
return tuple.__new__(_cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(self):
'Return a new dict which maps field names to their values'
return dict(zip(self._fields, self)) \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = _self._make(map(kwds.pop, %(field_names)r, _self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += ' %s = _property(_itemgetter(%d))\n' % (name, i)
if verbose:
print(template)
# Execute the template string in a temporary namespace
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
_property=property, _tuple=tuple)
try:
exec(template,namespace)
except SyntaxError as e:
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
if __name__ == '__main__':
Point = namedtuple('Point', ['x', 'y'])
p = Point(11, y=22)
print(p[0]+p[1])
x,y=p
print(x,y)
print(p.x+p.y)
print(p)
|
|
"""XML-RPC Servers.
This module can be used to create simple XML-RPC servers
by creating a server and either installing functions, a
class instance, or by extending the SimpleXMLRPCServer
class.
It can also be used to handle XML-RPC requests in a CGI
environment using CGIXMLRPCRequestHandler.
The Doc* classes can be used to create XML-RPC servers that
serve pydoc-style documentation in response to HTTP
GET requests. This documentation is dynamically generated
based on the functions and methods registered with the
server.
A list of possible usage patterns follows:
1. Install functions:
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
2. Install an instance:
class MyFuncs:
def __init__(self):
# make all of the sys functions available through sys.func_name
import sys
self.sys = sys
def _listMethods(self):
# implement this method so that system.listMethods
# knows to advertise the sys methods
return list_public_methods(self) + \
['sys.' + method for method in list_public_methods(self.sys)]
def pow(self, x, y): return pow(x, y)
def add(self, x, y) : return x + y
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(MyFuncs())
server.serve_forever()
3. Install an instance with custom dispatch method:
class Math:
def _listMethods(self):
# this method must be present for system.listMethods
# to work
return ['add', 'pow']
def _methodHelp(self, method):
# this method must be present for system.methodHelp
# to work
if method == 'add':
return "add(2,3) => 5"
elif method == 'pow':
return "pow(x, y[, z]) => number"
else:
# By convention, return empty
# string if no help is available
return ""
def _dispatch(self, method, params):
if method == 'pow':
return pow(*params)
elif method == 'add':
return params[0] + params[1]
else:
raise ValueError('bad method')
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(Math())
server.serve_forever()
4. Subclass SimpleXMLRPCServer:
class MathServer(SimpleXMLRPCServer):
def _dispatch(self, method, params):
try:
# We are forcing the 'export_' prefix on methods that are
# callable through XML-RPC to prevent potential security
# problems
func = getattr(self, 'export_' + method)
except AttributeError:
raise Exception('method "%s" is not supported' % method)
else:
return func(*params)
def export_add(self, x, y):
return x + y
server = MathServer(("localhost", 8000))
server.serve_forever()
5. CGI script:
server = CGIXMLRPCRequestHandler()
server.register_function(pow)
server.handle_request()
"""
# Written by Brian Quinlan (brian@sweetapp.com).
# Based on code written by Fredrik Lundh.
from xmlrpc.client import Fault, dumps, loads, gzip_encode, gzip_decode
from http.server import BaseHTTPRequestHandler
import http.server
import socketserver
import sys
import os
import re
import pydoc
import inspect
import traceback
try:
import fcntl
except ImportError:
fcntl = None
def resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
"""resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
Resolves a dotted attribute name to an object. Raises
an AttributeError if any attribute in the chain starts with a '_'.
If the optional allow_dotted_names argument is false, dots are not
supported and this function operates similar to getattr(obj, attr).
"""
if allow_dotted_names:
attrs = attr.split('.')
else:
attrs = [attr]
for i in attrs:
if i.startswith('_'):
raise AttributeError(
'attempt to access private attribute "%s"' % i
)
else:
obj = getattr(obj,i)
return obj
def list_public_methods(obj):
"""Returns a list of attribute strings, found in the specified
object, which represent callable attributes"""
return [member for member in dir(obj)
if not member.startswith('_') and
hasattr(getattr(obj, member), '__call__')]
class SimpleXMLRPCDispatcher:
"""Mix-in class that dispatches XML-RPC requests.
This class is used to register XML-RPC method handlers
and then to dispatch them. This class doesn't need to be
instanced directly when used by SimpleXMLRPCServer but it
can be instanced when used by the MultiPathXMLRPCServer
"""
def __init__(self, allow_none=False, encoding=None):
self.funcs = {}
self.instance = None
self.allow_none = allow_none
self.encoding = encoding or 'utf-8'
def register_instance(self, instance, allow_dotted_names=False):
"""Registers an instance to respond to XML-RPC requests.
Only one instance can be installed at a time.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called. Methods beginning with an '_'
are considered private and will not be called by
SimpleXMLRPCServer.
If a registered function matches a XML-RPC request, then it
will be called instead of the registered instance.
If the optional allow_dotted_names argument is true and the
instance does not have a _dispatch method, method names
containing dots are supported and resolved, as long as none of
the name segments start with an '_'.
*** SECURITY WARNING: ***
Enabling the allow_dotted_names options allows intruders
to access your module's global variables and may allow
intruders to execute arbitrary code on your machine. Only
use this option on a secure, closed network.
"""
self.instance = instance
self.allow_dotted_names = allow_dotted_names
def register_function(self, function, name=None):
"""Registers a function to respond to XML-RPC requests.
The optional name argument can be used to set a Unicode name
for the function.
"""
if name is None:
name = function.__name__
self.funcs[name] = function
def register_introspection_functions(self):
"""Registers the XML-RPC introspection methods in the system
namespace.
see http://xmlrpc.usefulinc.com/doc/reserved.html
"""
self.funcs.update({'system.listMethods' : self.system_listMethods,
'system.methodSignature' : self.system_methodSignature,
'system.methodHelp' : self.system_methodHelp})
def register_multicall_functions(self):
"""Registers the XML-RPC multicall method in the system
namespace.
see http://www.xmlrpc.com/discuss/msgReader$1208"""
self.funcs.update({'system.multicall' : self.system_multicall})
def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
"""Dispatches an XML-RPC method from marshalled (XML) data.
XML-RPC methods are dispatched from the marshalled (XML) data
using the _dispatch method and the result is returned as
marshalled data. For backwards compatibility, a dispatch
function can be provided as an argument (see comment in
SimpleXMLRPCRequestHandler.do_POST) but overriding the
existing method through subclassing is the prefered means
of changing method dispatch behavior.
"""
try:
params, method = loads(data)
# generate response
if dispatch_method is not None:
response = dispatch_method(method, params)
else:
response = self._dispatch(method, params)
# wrap response in a singleton tuple
response = (response,)
response = dumps(response, methodresponse=1,
allow_none=self.allow_none, encoding=self.encoding)
except Fault as fault:
response = dumps(fault, allow_none=self.allow_none,
encoding=self.encoding)
except:
# report exception back to server
exc_type, exc_value, exc_tb = sys.exc_info()
response = dumps(
Fault(1, "%s:%s" % (exc_type, exc_value)),
encoding=self.encoding, allow_none=self.allow_none,
)
return response.encode(self.encoding)
def system_listMethods(self):
"""system.listMethods() => ['add', 'subtract', 'multiple']
Returns a list of the methods supported by the server."""
methods = set(self.funcs.keys())
if self.instance is not None:
# Instance can implement _listMethod to return a list of
# methods
if hasattr(self.instance, '_listMethods'):
methods |= set(self.instance._listMethods())
# if the instance has a _dispatch method then we
# don't have enough information to provide a list
# of methods
elif not hasattr(self.instance, '_dispatch'):
methods |= set(list_public_methods(self.instance))
return sorted(methods)
def system_methodSignature(self, method_name):
"""system.methodSignature('add') => [double, int, int]
Returns a list describing the signature of the method. In the
above example, the add method takes two integers as arguments
and returns a double result.
This server does NOT support system.methodSignature."""
# See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html
return 'signatures not supported'
def system_methodHelp(self, method_name):
"""system.methodHelp('add') => "Adds two integers together"
Returns a string containing documentation for the specified method."""
method = None
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
# Instance can implement _methodHelp to return help for a method
if hasattr(self.instance, '_methodHelp'):
return self.instance._methodHelp(method_name)
# if the instance has a _dispatch method then we
# don't have enough information to provide help
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name,
self.allow_dotted_names
)
except AttributeError:
pass
# Note that we aren't checking that the method actually
# be a callable object of some kind
if method is None:
return ""
else:
import pydoc
return pydoc.getdoc(method)
def system_multicall(self, call_list):
"""system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \
[[4], ...]
Allows the caller to package multiple XML-RPC calls into a single
request.
See http://www.xmlrpc.com/discuss/msgReader$1208
"""
results = []
for call in call_list:
method_name = call['methodName']
params = call['params']
try:
# XXX A marshalling error in any response will fail the entire
# multicall. If someone cares they should fix this.
results.append([self._dispatch(method_name, params)])
except Fault as fault:
results.append(
{'faultCode' : fault.faultCode,
'faultString' : fault.faultString}
)
except:
exc_type, exc_value, exc_tb = sys.exc_info()
results.append(
{'faultCode' : 1,
'faultString' : "%s:%s" % (exc_type, exc_value)}
)
return results
def _dispatch(self, method, params):
"""Dispatches the XML-RPC method.
XML-RPC calls are forwarded to a registered function that
matches the called XML-RPC method name. If no such function
exists then the call is forwarded to the registered instance,
if available.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called.
Methods beginning with an '_' are considered private and will
not be called.
"""
func = None
try:
# check to see if a matching function has been registered
func = self.funcs[method]
except KeyError:
if self.instance is not None:
# check for a _dispatch method
if hasattr(self.instance, '_dispatch'):
return self.instance._dispatch(method, params)
else:
# call instance method directly
try:
func = resolve_dotted_attribute(
self.instance,
method,
self.allow_dotted_names
)
except AttributeError:
pass
if func is not None:
return func(*params)
else:
raise Exception('method "%s" is not supported' % method)
class SimpleXMLRPCRequestHandler(BaseHTTPRequestHandler):
"""Simple XML-RPC request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
"""
# Class attribute listing the accessible path components;
# paths not on this list will result in a 404 error.
rpc_paths = ('/', '/RPC2')
#if not None, encode responses larger than this, if possible
encode_threshold = 1400 #a common MTU
#Override form StreamRequestHandler: full buffering of output
#and no Nagle.
wbufsize = -1
disable_nagle_algorithm = True
# a re to match a gzip Accept-Encoding
aepattern = re.compile(r"""
\s* ([^\s;]+) \s* #content-coding
(;\s* q \s*=\s* ([0-9\.]+))? #q
""", re.VERBOSE | re.IGNORECASE)
def accept_encodings(self):
r = {}
ae = self.headers.get("Accept-Encoding", "")
for e in ae.split(","):
match = self.aepattern.match(e)
if match:
v = match.group(3)
v = float(v) if v else 1.0
r[match.group(1)] = v
return r
def is_rpc_path_valid(self):
if self.rpc_paths:
return self.path in self.rpc_paths
else:
# If .rpc_paths is empty, just assume all paths are legal
return True
def do_POST(self):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10*1024*1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
L.append(self.rfile.read(chunk_size))
size_remaining -= len(L[-1])
data = b''.join(L)
data = self.decode_request_content(data)
if data is None:
return #response has been sent
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(
data, getattr(self, '_dispatch', None), self.path
)
except Exception as e: # This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
# Send information about the exception if requested
if hasattr(self.server, '_send_traceback_header') and \
self.server._send_traceback_header:
self.send_header("X-exception", str(e))
trace = traceback.format_exc()
trace = str(trace.encode('ASCII', 'backslashreplace'), 'ASCII')
self.send_header("X-traceback", trace)
self.send_header("Content-length", "0")
self.end_headers()
else:
self.send_response(200)
self.send_header("Content-type", "text/xml")
if self.encode_threshold is not None:
if len(response) > self.encode_threshold:
q = self.accept_encodings().get("gzip", 0)
if q:
try:
response = gzip_encode(response)
self.send_header("Content-Encoding", "gzip")
except NotImplementedError:
pass
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
def decode_request_content(self, data):
#support gzip encoding of request
encoding = self.headers.get("content-encoding", "identity").lower()
if encoding == "identity":
return data
if encoding == "gzip":
try:
return gzip_decode(data)
except NotImplementedError:
self.send_response(501, "encoding %r not supported" % encoding)
except ValueError:
self.send_response(400, "error decoding gzip content")
else:
self.send_response(501, "encoding %r not supported" % encoding)
self.send_header("Content-length", "0")
self.end_headers()
def report_404 (self):
# Report a 404 error
self.send_response(404)
response = b'No such page'
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
def log_request(self, code='-', size='-'):
"""Selectively log an accepted request."""
if self.server.logRequests:
BaseHTTPRequestHandler.log_request(self, code, size)
class SimpleXMLRPCServer(socketserver.TCPServer,
SimpleXMLRPCDispatcher):
"""Simple XML-RPC server.
Simple XML-RPC server that allows functions and a single instance
to be installed to handle requests. The default implementation
attempts to dispatch XML-RPC calls to the functions or instance
installed in the server. Override the _dispatch method inhereted
from SimpleXMLRPCDispatcher to change this behavior.
"""
allow_reuse_address = True
# Warning: this is for debugging purposes only! Never set this to True in
# production code, as will be sending out sensitive information (exception
# and stack trace details) when exceptions are raised inside
# SimpleXMLRPCRequestHandler.do_POST
_send_traceback_header = False
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None, bind_and_activate=True):
self.logRequests = logRequests
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
socketserver.TCPServer.__init__(self, addr, requestHandler, bind_and_activate)
# [Bug #1222790] If possible, set close-on-exec flag; if a
# method spawns a subprocess, the subprocess shouldn't have
# the listening socket open.
if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
class MultiPathXMLRPCServer(SimpleXMLRPCServer):
"""Multipath XML-RPC Server
This specialization of SimpleXMLRPCServer allows the user to create
multiple Dispatcher instances and assign them to different
HTTP request paths. This makes it possible to run two or more
'virtual XML-RPC servers' at the same port.
Make sure that the requestHandler accepts the paths in question.
"""
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None, bind_and_activate=True):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests, allow_none,
encoding, bind_and_activate)
self.dispatchers = {}
self.allow_none = allow_none
self.encoding = encoding
def add_dispatcher(self, path, dispatcher):
self.dispatchers[path] = dispatcher
return dispatcher
def get_dispatcher(self, path):
return self.dispatchers[path]
def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
try:
response = self.dispatchers[path]._marshaled_dispatch(
data, dispatch_method, path)
except:
# report low level exception back to server
# (each dispatcher should have handled their own
# exceptions)
exc_type, exc_value = sys.exc_info()[:2]
response = xmlrpclib.dumps(
xmlrpclib.Fault(1, "%s:%s" % (exc_type, exc_value)),
encoding=self.encoding, allow_none=self.allow_none)
return response
class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher):
"""Simple handler for XML-RPC data passed through CGI."""
def __init__(self, allow_none=False, encoding=None):
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
def handle_xmlrpc(self, request_text):
"""Handle a single XML-RPC request"""
response = self._marshaled_dispatch(request_text)
print('Content-Type: text/xml')
print('Content-Length: %d' % len(response))
print()
sys.stdout.flush()
sys.stdout.buffer.write(response)
sys.stdout.buffer.flush()
def handle_get(self):
"""Handle a single HTTP GET request.
Default implementation indicates an error because
XML-RPC uses the POST method.
"""
code = 400
message, explain = BaseHTTPRequestHandler.responses[code]
response = http.server.DEFAULT_ERROR_MESSAGE % \
{
'code' : code,
'message' : message,
'explain' : explain
}
response = response.encode('utf-8')
print('Status: %d %s' % (code, message))
print('Content-Type: %s' % http.server.DEFAULT_ERROR_CONTENT_TYPE)
print('Content-Length: %d' % len(response))
print()
sys.stdout.flush()
sys.stdout.buffer.write(response)
sys.stdout.buffer.flush()
def handle_request(self, request_text=None):
"""Handle a single XML-RPC request passed through a CGI post method.
If no XML data is given then it is read from stdin. The resulting
XML-RPC response is printed to stdout along with the correct HTTP
headers.
"""
if request_text is None and \
os.environ.get('REQUEST_METHOD', None) == 'GET':
self.handle_get()
else:
# POST data is normally available through stdin
try:
length = int(os.environ.get('CONTENT_LENGTH', None))
except (ValueError, TypeError):
length = -1
if request_text is None:
request_text = sys.stdin.read(length)
self.handle_xmlrpc(request_text)
# -----------------------------------------------------------------------------
# Self documenting XML-RPC Server.
class ServerHTMLDoc(pydoc.HTMLDoc):
"""Class used to generate pydoc HTML document for a server"""
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
# XXX Note that this regular expression does not allow for the
# hyperlinking of arbitrary strings being used as method
# names. Only methods with names consisting of word characters
# and '.'s are hyperlinked.
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?((?:\w|\.)+))\b')
while 1:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
def docroutine(self, object, name, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
title = '<a name="%s"><strong>%s</strong></a>' % (
self.escape(anchor), self.escape(name))
if inspect.ismethod(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
# exclude the argument bound to the instance, it will be
# confusing to the non-Python user
argspec = inspect.formatargspec (
args[1:],
varargs,
varkw,
defaults,
formatvalue=self.formatvalue
)
elif inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
else:
argspec = '(...)'
if isinstance(object, tuple):
argspec = object[0] or argspec
docstring = object[1] or ""
else:
docstring = pydoc.getdoc(object)
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
doc = self.markup(
docstring, self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def docserver(self, server_name, package_documentation, methods):
"""Produce HTML documentation for an XML-RPC server."""
fdict = {}
for key, value in methods.items():
fdict[key] = '#-' + key
fdict[value] = fdict[key]
server_name = self.escape(server_name)
head = '<big><big><strong>%s</strong></big></big>' % server_name
result = self.heading(head, '#ffffff', '#7799ee')
doc = self.markup(package_documentation, self.preformat, fdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
contents = []
method_items = sorted(methods.items())
for key, value in method_items:
contents.append(self.docroutine(value, key, funcs=fdict))
result = result + self.bigsection(
'Methods', '#ffffff', '#eeaa77', ''.join(contents))
return result
class XMLRPCDocGenerator:
"""Generates documentation for an XML-RPC server.
This class is designed as mix-in and should not
be constructed directly.
"""
def __init__(self):
# setup variables used for HTML documentation
self.server_name = 'XML-RPC Server Documentation'
self.server_documentation = \
"This server exports the following methods through the XML-RPC "\
"protocol."
self.server_title = 'XML-RPC Server Documentation'
def set_server_title(self, server_title):
"""Set the HTML title of the generated server documentation"""
self.server_title = server_title
def set_server_name(self, server_name):
"""Set the name of the generated HTML server documentation"""
self.server_name = server_name
def set_server_documentation(self, server_documentation):
"""Set the documentation string for the entire server."""
self.server_documentation = server_documentation
def generate_html_documentation(self):
"""generate_html_documentation() => html documentation for the server
Generates HTML documentation for the server using introspection for
installed functions and instances that do not implement the
_dispatch method. Alternatively, instances can choose to implement
the _get_method_argstring(method_name) method to provide the
argument string used in the documentation and the
_methodHelp(method_name) method to provide the help text used
in the documentation."""
methods = {}
for method_name in self.system_listMethods():
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
method_info = [None, None] # argspec, documentation
if hasattr(self.instance, '_get_method_argstring'):
method_info[0] = self.instance._get_method_argstring(method_name)
if hasattr(self.instance, '_methodHelp'):
method_info[1] = self.instance._methodHelp(method_name)
method_info = tuple(method_info)
if method_info != (None, None):
method = method_info
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name
)
except AttributeError:
method = method_info
else:
method = method_info
else:
assert 0, "Could not find method in self.functions and no "\
"instance installed"
methods[method_name] = method
documenter = ServerHTMLDoc()
documentation = documenter.docserver(
self.server_name,
self.server_documentation,
methods
)
return documenter.page(self.server_title, documentation)
class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
"""XML-RPC and documentation request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
Handles all HTTP GET requests and interprets them as requests
for documentation.
"""
def do_GET(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
response = self.server.generate_html_documentation().encode('utf-8')
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
class DocXMLRPCServer( SimpleXMLRPCServer,
XMLRPCDocGenerator):
"""XML-RPC and HTML documentation server.
Adds the ability to serve server documentation to the capabilities
of SimpleXMLRPCServer.
"""
def __init__(self, addr, requestHandler=DocXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None,
bind_and_activate=True):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests,
allow_none, encoding, bind_and_activate)
XMLRPCDocGenerator.__init__(self)
class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler,
XMLRPCDocGenerator):
"""Handler for XML-RPC data and documentation requests passed through
CGI"""
def handle_get(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
response = self.generate_html_documentation().encode('utf-8')
print('Content-Type: text/html')
print('Content-Length: %d' % len(response))
print()
sys.stdout.flush()
sys.stdout.buffer.write(response)
sys.stdout.buffer.flush()
def __init__(self):
CGIXMLRPCRequestHandler.__init__(self)
XMLRPCDocGenerator.__init__(self)
if __name__ == '__main__':
print('Running XML-RPC server on port 8000')
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
|
|
# Standard imports
import logging
import attrdict as ad
import numpy as np
import pandas as pd
import datetime as pydt
# Our imports
import emission.analysis.point_features as pf
import emission.analysis.intake.segmentation.trip_segmentation as eaist
import emission.core.wrapper.location as ecwl
class DwellSegmentationTimeFilter(eaist.TripSegmentationMethod):
def __init__(self, time_threshold, point_threshold, distance_threshold):
"""
Determines segmentation points for points that were generated using a
time filter (i.e. report points every n seconds). This will *not* work for
points generated using a distance filter because it expects to have a
cluster of points to detect the trip end, and with a distance filter,
we will not get updates while we are still.
At least on android, we can get updates at a different frequency than
the "n" specified above. In particular:
a) we can get updates more frequently than "n" if there are other apps
that are requesting updates frequently - for example, while using a routing app.
b) we can get updates less frequently than "n" if there are bad/low
accuracy points that are filtered out.
So we use a combination of a time filter and a "number of points"
filter to detect the trip end.
The time_threshold indicates the number of seconds that we need to be
still before a trip end is detected.
The point_threshold indicates the number of prior points (after
filtering) that we need to be still for before a trip end is detected
The distance_threshold indicates the radius of the circle used to
detect that we are still. If all the points within the
time_threshold AND all the points within the point_threshold are
within the distance_threshold of each other, then we are still.
"""
self.time_threshold = time_threshold
self.point_threshold = point_threshold
self.distance_threshold = distance_threshold
def segment_into_trips(self, timeseries, time_query):
"""
Examines the timeseries database for a specific range and returns the
segmentation points. Note that the input is the entire timeseries and
the time range. This allows algorithms to use whatever combination of
data that they want from the sensor streams in order to determine the
segmentation points.
"""
filtered_points_df = timeseries.get_data_df("background/filtered_location", time_query)
transition_df = timeseries.get_data_df("statemachine/transition", time_query)
logging.debug("transition_df = %s" % transition_df[["fmt_time", "transition"]])
self.last_ts_processed = None
logging.info("Last ts processed = %s" % self.last_ts_processed)
segmentation_points = []
last_trip_end_point = None
curr_trip_start_point = None
just_ended = True
prevPoint = None
for idx, row in filtered_points_df.iterrows():
currPoint = ad.AttrDict(row)
currPoint.update({"idx": idx})
logging.debug("-" * 30 + str(currPoint.fmt_time) + "-" * 30)
if curr_trip_start_point is None:
logging.debug("Appending currPoint because the current start point is None")
# segmentation_points.append(currPoint)
if just_ended:
if self.continue_just_ended(idx, currPoint, filtered_points_df):
# We have "processed" the currPoint by deciding to glom it
self.last_ts_processed = currPoint.ts
continue
# else:
sel_point = currPoint
logging.debug("Setting new trip start point %s with idx %s" % (sel_point, sel_point.idx))
curr_trip_start_point = sel_point
just_ended = False
last5MinsPoints_df = filtered_points_df[np.logical_and(
np.logical_and(
filtered_points_df.ts > currPoint.ts - self.time_threshold,
filtered_points_df.ts < currPoint.ts),
filtered_points_df.ts >= curr_trip_start_point.ts)]
# Using .loc here causes problems if we have filtered out some points and so the index is non-consecutive.
# Using .iloc just ends up including points after this one.
# So we reset_index upstream and use it here.
# We are going to use the last 8 points for now.
# TODO: Change this back to last 10 points once we normalize phone and this
last10Points_df = filtered_points_df.iloc[max(idx-self.point_threshold, curr_trip_start_point.idx):idx+1]
distanceToLast = lambda(row): pf.calDistance(ad.AttrDict(row), currPoint)
timeToLast = lambda(row): currPoint.ts - ad.AttrDict(row).ts
last5MinsDistances = last5MinsPoints_df.apply(distanceToLast, axis=1)
logging.debug("last5MinsDistances = %s with length %d" % (last5MinsDistances.as_matrix(), len(last5MinsDistances)))
last10PointsDistances = last10Points_df.apply(distanceToLast, axis=1)
logging.debug("last10PointsDistances = %s with length %d, shape %s" % (last10PointsDistances.as_matrix(),
len(last10PointsDistances),
last10PointsDistances.shape))
# Fix for https://github.com/e-mission/e-mission-server/issues/348
last5MinTimes = last5MinsPoints_df.apply(timeToLast, axis=1)
logging.debug("len(last10PointsDistances) = %d, len(last5MinsDistances) = %d" %
(len(last10PointsDistances), len(last5MinsDistances)))
logging.debug("last5MinsTimes.max() = %s, time_threshold = %s" %
(last5MinTimes.max() if len(last5MinTimes) > 0 else np.NaN, self.time_threshold))
if self.has_trip_ended(prevPoint, currPoint, last10PointsDistances, last5MinsDistances, last5MinTimes):
(ended_before_this, last_trip_end_point) = self.get_last_trip_end_point(filtered_points_df,
last10Points_df, last5MinsPoints_df)
segmentation_points.append((curr_trip_start_point, last_trip_end_point))
logging.info("Found trip end at %s" % last_trip_end_point.fmt_time)
# We have processed everything up to the trip end by marking it as a completed trip
self.last_ts_processed = currPoint.ts
if ended_before_this:
# in this case, we end a trip at the previous point, and the next trip starts at this
# point, not the next one
just_ended = False
prevPoint = currPoint
curr_trip_start_point = currPoint
logging.debug("Setting new trip start point %s with idx %s" %
(currPoint, currPoint.idx))
else:
# We end a trip at the current point, and the next trip starts at the next point
just_ended = True
prevPoint = None
else:
prevPoint = currPoint
if not just_ended and len(transition_df) > 0:
stopped_moving_after_last = transition_df[(transition_df.ts > currPoint.ts) & (transition_df.transition == 2)]
if len(stopped_moving_after_last) > 0:
(unused, last_trip_end_point) = self.get_last_trip_end_point(filtered_points_df,
last10Points_df, None)
segmentation_points.append((curr_trip_start_point, last_trip_end_point))
logging.debug("Found trip end at %s" % last_trip_end_point.fmt_time)
# We have processed everything up to the trip end by marking it as a completed trip
self.last_ts_processed = currPoint.ts
return segmentation_points
def continue_just_ended(self, idx, currPoint, filtered_points_df):
"""
Normally, since the logic here and the
logic on the phone are the same, if we have detected a trip
end, any points after this are part of the new trip.
However, in some circumstances, notably in my data from 27th
August, there appears to be a mismatch and we get a couple of
points past the end that we detected here. So let's look for
points that are within the distance filter, and are at a
delta of a minute, and join them to the just ended trip instead of using them to
start the new trip
:param idx: Index of the current point
:param currPoint: current point
:param filtered_points_df: dataframe of filtered points
:return: True if we should continue the just ended trip, False otherwise
"""
if idx == 0:
return False
else:
prev_point = ad.AttrDict(filtered_points_df.iloc[idx - 1])
logging.debug("Comparing with prev_point = %s" % prev_point)
if pf.calDistance(prev_point, currPoint) < self.distance_threshold and \
currPoint.ts - prev_point.ts <= 60:
logging.info("Points %s and %s are within the distance filter and only 1 min apart so part of the same trip" %
(prev_point, currPoint))
return True
else:
return False
def has_trip_ended(self, prev_point, curr_point, last10PointsDistances, last5MinsDistances, last5MinTimes):
# Another mismatch between phone and server. Phone stops tracking too soon,
# so the distance is still greater than the threshold at the end of the trip.
# But then the next point is a long time away, so we can split again (similar to a distance filter)
if prev_point is None:
logging.debug("prev_point is None, continuing trip")
else:
timeDelta = curr_point.ts - prev_point.ts
distDelta = pf.calDistance(prev_point, curr_point)
if timeDelta > 0:
speedDelta = distDelta / timeDelta
else:
speedDelta = np.nan
speedThreshold = float(self.distance_threshold) / self.time_threshold
if (timeDelta > 2 * self.time_threshold and # We have been here for a while
speedDelta < speedThreshold): # we haven't moved very much
logging.debug("prev_point.ts = %s, curr_point.ts = %s, threshold = %s, large gap = %s, ending trip" %
(prev_point.ts, curr_point.ts,self.time_threshold, curr_point.ts - prev_point.ts))
return True
else:
logging.debug("prev_point.ts = %s, curr_point.ts = %s, time gap = %s (vs %s), distance_gap = %s (vs %s), speed_gap = %s (vs %s) continuing trip" %
(prev_point.ts, curr_point.ts,
timeDelta, self.time_threshold,
distDelta, self.distance_threshold,
speedDelta, speedThreshold))
# The -30 is a fuzz factor intended to compensate for older clients
# where data collection stopped after 5 mins, so that we never actually
# see 5 mins of data
if (len(last10PointsDistances) < self.point_threshold - 1 or
len(last5MinsDistances) == 0 or
last5MinTimes.max() < self.time_threshold - 30):
logging.debug("Too few points to make a decision, continuing")
return False
# Normal end-of-trip case
logging.debug("last5MinsDistances.max() = %s, last10PointsDistance.max() = %s" %
(last5MinsDistances.max(), last10PointsDistances.max()))
if (last5MinsDistances.max() < self.distance_threshold and
last10PointsDistances.max() < self.distance_threshold):
return True
def get_last_trip_end_point(self, filtered_points_df, last10Points_df, last5MinsPoints_df):
ended_before_this = last5MinsPoints_df is None or len(last5MinsPoints_df) == 0
if ended_before_this:
logging.debug("trip end transition, so last 10 points are %s" % last10Points_df.index)
last10PointsMedian = np.median(last10Points_df.index)
last_trip_end_index = int(last10PointsMedian)
logging.debug("last5MinsPoints not found, last_trip_end_index = %s" % last_trip_end_index)
else:
last10PointsMedian = np.median(last10Points_df.index)
last5MinsPointsMedian = np.median(last5MinsPoints_df.index)
last_trip_end_index = int(min(last5MinsPointsMedian, last10PointsMedian))
logging.debug("last5MinsPoints and last10PointsMedian found, last_trip_end_index = %s" % last_trip_end_index)
# logging.debug("last5MinPoints.median = %s (%s), last10Points_df = %s (%s), sel index = %s" %
# (np.median(last5MinsPoints_df.index), last5MinsPoints_df.index,
# np.median(last10Points_df.index), last10Points_df.index,
# last_trip_end_index))
last_trip_end_point_row = filtered_points_df.iloc[last_trip_end_index]
last_trip_end_point = ad.AttrDict(filtered_points_df.iloc[last_trip_end_index])
logging.debug("Appending last_trip_end_point %s with index %s " %
(last_trip_end_point, last_trip_end_point_row.name))
return (ended_before_this, last_trip_end_point)
|
|
# pylint: disable=E0401
# stdlib
from functools import partial
import logging
import time
import unittest
# 3rd
from mock import Mock, patch
# project
from tests.checks.common import Fixtures
from utils.timeout import TimeoutException
log = logging.getLogger(__name__)
WMISampler = None
ProviderArchitecture = None
def load_fixture(f, args=None):
"""
Build a WMI query result from a file and given parameters.
"""
properties = []
args = args or []
def extract_line(line):
"""
Extract a property name, value and the qualifiers from a fixture line.
Return (property name, property value, property qualifiers)
"""
property_counter_type = ""
try:
property_name, property_value, property_counter_type = line.split(" ")
except ValueError:
property_name, property_value = line.split(" ")
property_qualifiers = [Mock(Name='CounterType', Value=int(property_counter_type))] \
if property_counter_type else []
return property_name, property_value, property_qualifiers
# Build from file
data = Fixtures.read_file(f)
for l in data.splitlines():
property_name, property_value, property_qualifiers = extract_line(l)
properties.append(
Mock(Name=property_name, Value=property_value, Qualifiers_=property_qualifiers)
)
# Append extra information
args = args if isinstance(args, list) else [args]
for arg in args:
property_name, property_value = arg
properties.append(Mock(Name=property_name, Value=property_value, Qualifiers_=[]))
return [Mock(Properties_=properties)]
class Counter(object):
def __init__(self):
self.value = 0
def __iadd__(self, other):
self.value += other
return self
def __eq__(self, other):
return self.value == other
def __str__(self):
return str(self.value)
def reset(self):
self.value = 0
class SWbemServices(object):
"""
SWbemServices a.k.a. (mocked) WMI connection.
Save connection parameters so it can be tested.
"""
# `ExecQuery` metadata
_exec_query_call_count = Counter()
_exec_query_run_time = 0
# Class attr to save the last wmi query and flags
_last_wmi_query = None
_last_wmi_flags = None
def __init__(self, wmi_conn_args):
super(SWbemServices, self).__init__()
self._wmi_conn_args = wmi_conn_args
@classmethod
def reset(cls):
"""
Dirty patch to reset `SWbemServices.ExecQuery.call_count` and
`SWbemServices._exec_query_run_time` to 0, and the wmi query params
"""
cls._exec_query_call_count.reset()
cls._exec_query_run_time = 0
cls._last_wmi_query = None
cls._last_wmi_flags = None
@classmethod
def get_last_wmi_query(cls):
"""
Return the last WMI query submitted via the WMI connection.
"""
return cls._last_wmi_query
@classmethod
def get_last_wmi_flags(cls):
"""
Return the last WMI flags submitted via the WMI connection.
"""
return cls._last_wmi_flags
def get_conn_args(self):
"""
Return parameters used to set up the WMI connection.
"""
return self._wmi_conn_args
def ExecQuery(self, query, query_language, flags):
"""
Mocked `SWbemServices.ExecQuery` method.
"""
# Comply with `ExecQuery` metadata
self._exec_query_call_count += 1
time.sleep(self._exec_query_run_time)
# Save last passed parameters
SWbemServices._last_wmi_query = query
SWbemServices._last_wmi_flags = flags
# Mock a result
results = []
if query in [
"Select AvgDiskBytesPerWrite,FreeMegabytes from Win32_PerfFormattedData_PerfDisk_LogicalDisk", # noqa
"Select AvgDiskBytesPerWrite,FreeMegabytes,Name from Win32_PerfFormattedData_PerfDisk_LogicalDisk" # noqa
]:
results += load_fixture("win32_perfformatteddata_perfdisk_logicaldisk", ("Name", "C:"))
results += load_fixture("win32_perfformatteddata_perfdisk_logicaldisk", ("Name", "D:"))
if query == "Select CounterRawCount,CounterCounter,Timestamp_Sys100NS,Frequency_Sys100NS from Win32_PerfRawData_PerfOS_System": # noqa
# Mock a previous and a current sample
sample_file = "win32_perfrawdata_perfos_system_previous" if flags == 131120\
else "win32_perfrawdata_perfos_system_current"
results += load_fixture(sample_file, ("Name", "C:"))
results += load_fixture(sample_file, ("Name", "D:"))
if query == "Select UnknownCounter,MissingProperty,Timestamp_Sys100NS,Frequency_Sys100NS from Win32_PerfRawData_PerfOS_System": # noqa
results += load_fixture("win32_perfrawdata_perfos_system_unknown", ("Name", "C:"))
if query in [
"Select NonDigit,FreeMegabytes from Win32_PerfFormattedData_PerfDisk_LogicalDisk",
"Select FreeMegabytes,NonDigit from Win32_PerfFormattedData_PerfDisk_LogicalDisk",
]: # noqa
results += load_fixture("win32_perfformatteddata_perfdisk_logicaldisk", [("Name", "C:"), ("NonDigit", "Foo")]) # noqa
if query == "Select IOReadBytesPerSec,IDProcess from Win32_PerfFormattedData_PerfProc_Process WHERE ( Name = 'chrome' )" \
or query == "Select IOReadBytesPerSec,UnknownProperty from Win32_PerfFormattedData_PerfProc_Process WHERE ( Name = 'chrome' )": # noqa
results += load_fixture("win32_perfformatteddata_perfproc_process")
if query == "Select IOReadBytesPerSec,ResultNotMatchingAnyTargetProperty from Win32_PerfFormattedData_PerfProc_Process WHERE ( Name = 'chrome' )": # noqa
results += load_fixture("win32_perfformatteddata_perfproc_process_alt")
if query == "Select CommandLine from Win32_Process WHERE ( Handle = '4036' )" \
or query == "Select UnknownProperty from Win32_Process WHERE ( Handle = '4036' )":
results += load_fixture("win32_process")
if query == ("Select ServiceUptime,TotalBytesSent,TotalBytesReceived,TotalBytesTransferred,CurrentConnections,TotalFilesSent,TotalFilesReceived," # noqa
"TotalConnectionAttemptsAllInstances,TotalGetRequests,TotalPostRequests,TotalHeadRequests,TotalPutRequests,TotalDeleteRequests," # noqa
"TotalOptionsRequests,TotalTraceRequests,TotalNotFoundErrors,TotalLockedErrors,TotalAnonymousUsers,TotalNonAnonymousUsers,TotalCGIRequests," # noqa
"TotalISAPIExtensionRequests from Win32_PerfFormattedData_W3SVC_WebService WHERE ( Name = 'Failing site' ) OR ( Name = 'Working site' ) OR ( Name = 'Default Web Site' )"): # noqa
results += load_fixture("win32_perfformatteddata_w3svc_webservice", ("Name", "Default Web Site")) # noqa
results += load_fixture("win32_perfformatteddata_w3svc_webservice", ("Name", "Working site")) # noqa
if query == ("Select ServiceUptime,TotalBytesSent,TotalBytesReceived,TotalBytesTransferred,CurrentConnections,TotalFilesSent,TotalFilesReceived," # noqa
"TotalConnectionAttemptsAllInstances,TotalGetRequests,TotalPostRequests,TotalHeadRequests,TotalPutRequests,TotalDeleteRequests," # noqa
"TotalOptionsRequests,TotalTraceRequests,TotalNotFoundErrors,TotalLockedErrors,TotalAnonymousUsers,TotalNonAnonymousUsers,TotalCGIRequests," # noqa
"TotalISAPIExtensionRequests from Win32_PerfFormattedData_W3SVC_WebService WHERE ( Name = '_Total' )"): # noqa
results += load_fixture("win32_perfformatteddata_w3svc_webservice", ("Name", "_Total")) # noqa
if query == ("Select * from Win32_PerfFormattedData_W3SVC_WebService WHERE ( Name = 'Failing site' ) OR ( Name = 'Working site' ) OR ( Name = 'Default Web Site' )"): # noqa
results += load_fixture("win32_perfformatteddata_w3svc_webservice_2008", ("Name", "Default Web Site")) # noqa
results += load_fixture("win32_perfformatteddata_w3svc_webservice_2008", ("Name", "Working Site")) # noqa
if query == ("Select Name,State from Win32_Service WHERE ( Name = 'WSService' ) OR ( Name = 'WinHttpAutoProxySvc' )"): # noqa
results += load_fixture("win32_service_up", ("Name", "WinHttpAutoProxySvc"))
results += load_fixture("win32_service_down", ("Name", "WSService"))
if query == ("Select EventCode,SourceName,TimeGenerated,Type,InsertionStrings,Message,Logfile from Win32_NTLogEvent WHERE ( ( SourceName = 'MSSQLSERVER' ) " # noqa
"AND ( Type = 'Error' OR Type = 'Warning' ) AND TimeGenerated >= '20151224113047.000000-480' )"): # noqa
results += load_fixture("win32_ntlogevent")
return results
ExecQuery.call_count = _exec_query_call_count
class Dispatch(object):
"""
Mock for win32com.client Dispatch class.
"""
_connect_call_count = Counter()
def __init__(self, *args, **kwargs):
pass
@classmethod
def reset(cls):
"""
FIXME - Dirty patch to reset `ConnectServer.call_count` to 0.
"""
cls._connect_call_count.reset()
def Add(self, *args, **kwargs):
"""
Add context information.
"""
pass
def ConnectServer(self, *args, **kwargs):
"""
Return a WMI connection, a.k.a. a SWbemServices object.
"""
Dispatch._connect_call_count += 1
wmi_conn_args = (args, kwargs)
return SWbemServices(wmi_conn_args)
ConnectServer.call_count = _connect_call_count
def to_time(wmi_ts):
"Just return any time struct"
return (2015, 12, 24, 11, 30, 47, 0, 0)
def from_time(year=0, month=0, day=0, hours=0, minutes=0,
seconds=0, microseconds=0, timezone=0):
"Just return any WMI date"
return "20151224113047.000000-480"
class TestCommonWMI(unittest.TestCase):
"""
Common toolbox for WMI unit testing.
"""
def setUp(self):
"""
Mock WMI related Python packages, so it can be tested on any environment.
"""
self.patcher = patch.dict('sys.modules', {
'pywintypes': Mock(),
'pythoncom': Mock(),
'win32com': Mock(),
'win32com.client': Mock(Dispatch=Dispatch),
})
self.patcher.start()
def tearDown(self):
"""
Reset Mock counters
"""
# Reset counters
Dispatch.reset()
SWbemServices.reset()
def assertWMIConn(self, wmi_sampler, param=None):
"""
Helper, assertion on the `wmi_sampler`'s WMI connection(s):
* `param`: parameters used to establish the connection
"""
if param:
connection = wmi_sampler.get_connection()
wmi_conn_args, wmi_conn_kwargs = connection.get_conn_args()
if isinstance(param, tuple):
key, value = param
self.assertIn(key, wmi_conn_kwargs)
self.assertEquals(wmi_conn_kwargs[key], value)
else:
self.assertIn(param, wmi_conn_args)
def assertWMIQuery(self, query=None, flags=None):
"""
Helper, assert that the given WMI query and flags were submitted.
"""
if query:
last_wmi_query = SWbemServices.get_last_wmi_query()
self.assertEquals(last_wmi_query, query)
if flags:
last_wmi_flags = SWbemServices.get_last_wmi_flags()
self.assertEquals(last_wmi_flags, flags)
def assertWMIObject(self, wmi_obj, properties):
"""
Assert the WMI object integrity, i.e. contains the given properties.
"""
for prop_and_value in properties:
prop = prop_and_value[0] if isinstance(prop_and_value, tuple) else prop_and_value
value = prop_and_value[1] if isinstance(prop_and_value, tuple) else None
self.assertIn(prop, wmi_obj)
if value is None:
continue
self.assertEquals(wmi_obj[prop], value)
def assertWMISampler(self, wmi_sampler, properties, count=None):
"""
Assert WMI objects' integrity among the WMI sampler.
"""
self.assertEquals(len(wmi_sampler), count)
for wmi_obj in wmi_sampler:
self.assertWMIObject(wmi_obj, properties)
def assertIn(self, first, second):
"""
Assert `first` in `second`.
Note: needs to be defined for Python 2.6
"""
self.assertTrue(first in second, "{0} not in {1}".format(first, second))
def assertNotIn(self, first, second):
"""
Assert `first` is not in `second`.
Note: needs to be defined for Python 2.6
"""
self.assertTrue(first not in second, "{0} in {1}".format(first, second))
def assertInPartial(self, first, second):
"""
Assert `first` has a key in `second` where it's a prefix.
Note: needs to be defined for Python 2.6
"""
self.assertTrue(any(key for key in second if key.startswith(first)), "{0} not in {1}".format(first, second))
def getProp(self, dict, prefix):
"""
Get Property from dictionary `dict` starting with `prefix`.
Note: needs to be defined for Python 2.6
"""
for key in dict:
if key.startswith(prefix):
return dict[key]
return None
class TestUnitWMISampler(TestCommonWMI):
"""
Unit tests for WMISampler.
"""
def setUp(self):
TestCommonWMI.setUp(self)
global WMISampler
global ProviderArchitecture
# Reload to apply the mocking if the module was already loaded
from checks.libs.wmi import sampler
reload(sampler)
WMISampler = partial(sampler.WMISampler, log)
ProviderArchitecture = sampler.ProviderArchitecture
def test_wmi_connection(self):
"""
Establish a WMI connection to the specified host/namespace, with the right credentials.
"""
wmi_sampler = WMISampler(
"Win32_PerfRawData_PerfOS_System",
["ProcessorQueueLength"],
host="myhost",
namespace="some/namespace",
username="datadog",
password="password",
provider=32,
)
# Request a connection but do nothing
wmi_sampler.get_connection()
# Connection was established with the right parameters
self.assertWMIConn(wmi_sampler, param="myhost")
self.assertWMIConn(wmi_sampler, param="some/namespace")
def test_wmi_provider_architecture(self):
"""
Validate and set a WMI Provider Architecture.
"""
# No provider given, default
wmi_sampler = WMISampler("Win32_PerfRawData_PerfOS_System", ["ProcessorQueueLength"])
self.assertEquals(wmi_sampler.provider, ProviderArchitecture.DEFAULT)
# Invalid provider, default
wmi_sampler1 = WMISampler(
"Win32_PerfRawData_PerfOS_System", ["ProcessorQueueLength"], provider="foo"
)
wmi_sampler2 = WMISampler(
"Win32_PerfRawData_PerfOS_System", ["ProcessorQueueLength"], provider=123
)
self.assertEquals(wmi_sampler1.provider, ProviderArchitecture.DEFAULT)
self.assertEquals(wmi_sampler2.provider, ProviderArchitecture.DEFAULT)
# Valid providers
wmi_sampler32 = WMISampler(
"Win32_PerfRawData_PerfOS_System", ["ProcessorQueueLength"], provider=32
)
wmi_sampler64 = WMISampler(
"Win32_PerfRawData_PerfOS_System", ["ProcessorQueueLength"], provider="64"
)
self.assertEquals(wmi_sampler32.provider, ProviderArchitecture._32BIT)
self.assertEquals(wmi_sampler64.provider, ProviderArchitecture._64BIT)
def test_no_wmi_connection_pooling(self):
"""
WMI connections are not be shared among WMISampler objects.
"""
from win32com.client import Dispatch # noqa
wmi_sampler_1 = WMISampler("Win32_PerfRawData_PerfOS_System", ["ProcessorQueueLength"])
wmi_sampler_2 = WMISampler("Win32_OperatingSystem", ["TotalVisibleMemorySize"])
wmi_sampler_3 = WMISampler("Win32_PerfRawData_PerfOS_System", ["ProcessorQueueLength"], host="myhost") # noqa
wmi_sampler_1.sample()
wmi_sampler_2.sample()
# 3 conns have been opened, 2 for the raw sampler and 1 for the other sampler
self.assertEquals(Dispatch.ConnectServer.call_count, 3, Dispatch.ConnectServer.call_count)
wmi_sampler_3.sample()
# 5 conns now
self.assertEquals(Dispatch.ConnectServer.call_count, 5, Dispatch.ConnectServer.call_count)
def test_wql_filtering(self):
"""
Format the filters to a comprehensive WQL `WHERE` clause.
"""
from checks.libs.wmi import sampler
format_filter = sampler.WMISampler._format_filter
# Check `_format_filter` logic
no_filters = []
filters = [{'Name': "SomeName", 'Id': "SomeId"}]
self.assertEquals("", format_filter(no_filters))
self.assertEquals(" WHERE ( Name = 'SomeName' AND Id = 'SomeId' )",
format_filter(filters))
def test_wql_multiquery_filtering(self):
"""
Format the filters with multiple properties per instance to a comprehensive WQL `WHERE` clause.
"""
from checks.libs.wmi import sampler
format_filter = sampler.WMISampler._format_filter
# Check `_format_filter` logic
no_filters = []
filters = [{'Name': "SomeName", 'Property1': "foo"}, {'Name': "OtherName", 'Property1': "bar"}]
self.assertEquals("", format_filter(no_filters))
self.assertEquals(" WHERE ( Property1 = 'bar' AND Name = 'OtherName' ) OR"
" ( Property1 = 'foo' AND Name = 'SomeName' )",
format_filter(filters))
def test_wql_empty_list(self):
"""
Format filters to a comprehensive WQL `WHERE` clause skipping empty lists.
"""
from checks.libs.wmi import sampler
format_filter = sampler.WMISampler._format_filter
filters = []
query = {}
query['User'] = ('=', 'luser')
query['SourceName'] = ('=', 'MSSQL')
query['EventCode'] = []
query['SomethingEmpty'] = []
query['MoreNothing'] = []
filters.append(query)
self.assertEquals(" WHERE ( SourceName = 'MSSQL' AND User = 'luser' )",
format_filter(filters))
def test_wql_filtering_op_adv(self):
"""
Format the filters to a comprehensive WQL `WHERE` clause w/ mixed filter containing regular and operator modified properties.
"""
from checks.libs.wmi import sampler
format_filter = sampler.WMISampler._format_filter
# Check `_format_filter` logic
filters = [{'Name': "Foo%"}, {'Name': "Bar%", 'Id': ('>=', "SomeId")}, {'Name': "Zulu"}]
self.assertEquals(" WHERE ( Name = 'Zulu' ) OR ( Name LIKE 'Bar%' AND Id >= 'SomeId' ) OR ( Name LIKE 'Foo%' )",
format_filter(filters))
def test_wql_eventlog_filtering(self):
"""
Format filters with the eventlog expected form to a comprehensive WQL `WHERE` clause.
"""
from checks.libs.wmi import sampler
from datetime import datetime
from checks.wmi_check import from_time
format_filter = sampler.WMISampler._format_filter
filters = []
query = {}
and_props = ['mEssage']
ltypes = ["Error", "Warning"]
source_names = ["MSSQLSERVER", "IIS"]
log_files = ["System", "Security"]
event_codes = [302, 404, 501]
message_filters = ["-foo", "%bar%", "%zen%"]
last_ts = datetime(2016, 1, 1, 15, 8, 24, 78915)
query['TimeGenerated'] = ('>=', from_time(last_ts))
query['Type'] = ('=', 'footype')
query['User'] = ('=', 'luser')
query['SourceName'] = ('=', 'MSSQL')
query['LogFile'] = ('=', 'thelogfile')
query['Type'] = []
for ltype in ltypes:
query['Type'].append(('=', ltype))
query['SourceName'] = []
for source_name in source_names:
query['SourceName'].append(('=', source_name))
query['LogFile'] = []
for log_file in log_files:
query['LogFile'].append(('=', log_file))
query['EventCode'] = []
for code in event_codes:
query['EventCode'].append(('=', code))
query['NOT Message'] = []
query['Message'] = []
for filt in message_filters:
if filt[0] == '-':
query['NOT Message'].append(('LIKE', filt[1:]))
else:
query['Message'].append(('LIKE', filt))
filters.append(query)
self.assertEquals(" WHERE ( NOT Message LIKE 'foo' AND ( EventCode = '302' OR EventCode = '404' OR EventCode = '501' ) "
"AND ( SourceName = 'MSSQLSERVER' OR SourceName = 'IIS' ) AND TimeGenerated >= '2016-01-01 15:08:24.078915**********.******+' "
"AND User = 'luser' AND Message LIKE '%bar%' AND Message LIKE '%zen%' AND ( LogFile = 'System' OR LogFile = 'Security' ) "
"AND ( Type = 'Error' OR Type = 'Warning' ) )",
format_filter(filters, and_props))
def test_wql_filtering_inclusive(self):
"""
Format the filters to a comprehensive and inclusive WQL `WHERE` clause.
"""
from checks.libs.wmi import sampler
format_filter = sampler.WMISampler._format_filter
# Check `_format_filter` logic
filters = [{'Name': "SomeName"}, {'Id': "SomeId"}]
self.assertEquals(" WHERE ( Id = 'SomeId' ) OR ( Name = 'SomeName' )",
format_filter(filters, True))
def test_wmi_query(self):
"""
Query WMI using WMI Query Language (WQL).
"""
# No filters
wmi_sampler = WMISampler("Win32_PerfFormattedData_PerfDisk_LogicalDisk",
["AvgDiskBytesPerWrite", "FreeMegabytes"])
wmi_sampler.sample()
self.assertWMIQuery(
"Select AvgDiskBytesPerWrite,FreeMegabytes"
" from Win32_PerfFormattedData_PerfDisk_LogicalDisk"
)
# Single filter
wmi_sampler = WMISampler("Win32_PerfFormattedData_PerfDisk_LogicalDisk",
["AvgDiskBytesPerWrite", "FreeMegabytes"],
filters=[{'Name': "C:"}])
wmi_sampler.sample()
self.assertWMIQuery(
"Select AvgDiskBytesPerWrite,FreeMegabytes"
" from Win32_PerfFormattedData_PerfDisk_LogicalDisk"
" WHERE ( Name = 'C:' )"
)
# Multiple filters
wmi_sampler = WMISampler("Win32_PerfFormattedData_PerfDisk_LogicalDisk",
["AvgDiskBytesPerWrite", "FreeMegabytes"],
filters=[{'Name': "C:", 'Id': "123"}])
wmi_sampler.sample()
self.assertWMIQuery(
"Select AvgDiskBytesPerWrite,FreeMegabytes"
" from Win32_PerfFormattedData_PerfDisk_LogicalDisk"
" WHERE ( Name = 'C:' AND Id = '123' )"
)
def test_wmi_parser(self):
"""
Parse WMI objects from WMI query results.
"""
wmi_sampler = WMISampler("Win32_PerfFormattedData_PerfDisk_LogicalDisk",
["AvgDiskBytesPerWrite", "FreeMegabytes"])
wmi_sampler.sample()
# Assert `results`
expected_results = [
{
'freemegabytes': 19742.0,
'name': 'C:',
'avgdiskbytesperwrite': 1536.0
}, {
'freemegabytes': 19742.0,
'name': 'D:',
'avgdiskbytesperwrite': 1536.0
}
]
self.assertEquals(wmi_sampler, expected_results, wmi_sampler)
def test_wmi_sampler_iterator_getter(self):
"""
Iterate/Get on the WMISampler object iterates/gets on its current sample.
"""
wmi_sampler = WMISampler("Win32_PerfFormattedData_PerfDisk_LogicalDisk",
["AvgDiskBytesPerWrite", "FreeMegabytes"])
wmi_sampler.sample()
self.assertEquals(len(wmi_sampler), 2)
# Using an iterator
for wmi_obj in wmi_sampler:
self.assertWMIObject(wmi_obj, ["AvgDiskBytesPerWrite", "FreeMegabytes", "name"])
# Using an accessor
for index in xrange(0, 2):
self.assertWMIObject(wmi_sampler[index], ["AvgDiskBytesPerWrite", "FreeMegabytes", "name"])
def test_wmi_sampler_timeout(self):
"""
Gracefully handle WMI query timeouts.
"""
from checks.libs.wmi.sampler import WMISampler
logger = Mock()
# Create a sampler that timeouts
wmi_sampler = WMISampler(logger, "Win32_PerfFormattedData_PerfDisk_LogicalDisk",
["AvgDiskBytesPerWrite", "FreeMegabytes"],
timeout_duration=0.1)
SWbemServices._exec_query_run_time = 0.11
# `TimeoutException` exception is raised, DEBUG message logged
self.assertRaises(TimeoutException, wmi_sampler.sample)
self.assertTrue(wmi_sampler._sampling)
self.assertTrue(logger.debug.called)
# Cannot iterate on data
self.assertRaises(TypeError, lambda: len(wmi_sampler))
self.assertRaises(TypeError, lambda: sum(1 for _ in wmi_sampler))
# Recover from timeout at next iteration
wmi_sampler.sample()
self.assertFalse(wmi_sampler._sampling)
# The existing query was retrieved
self.assertEquals(SWbemServices.ExecQuery.call_count, 1, SWbemServices.ExecQuery.call_count)
# Data is populated
self.assertEquals(len(wmi_sampler), 2)
self.assertEquals(sum(1 for _ in wmi_sampler), 2)
def test_raw_perf_properties(self):
"""
Extend the list of properties to query for RAW Performance classes.
"""
# Formatted Performance class
wmi_sampler = WMISampler("Win32_PerfFormattedData_PerfOS_System", ["ProcessorQueueLength"])
self.assertEquals(len(wmi_sampler.property_names), 1)
# Raw Performance class
wmi_sampler = WMISampler("Win32_PerfRawData_PerfOS_System", ["CounterRawCount", "CounterCounter"]) # noqa
self.assertEquals(len(wmi_sampler.property_names), 4)
def test_raw_initial_sampling(self):
"""
Query for initial sample for RAW Performance classes.
"""
wmi_sampler = WMISampler("Win32_PerfRawData_PerfOS_System", ["CounterRawCount", "CounterCounter"]) # noqa
wmi_sampler.sample()
# 2 queries should have been made: one for initialization, one for sampling
self.assertEquals(SWbemServices.ExecQuery.call_count, 2, SWbemServices.ExecQuery.call_count)
# Repeat
wmi_sampler.sample()
self.assertEquals(SWbemServices.ExecQuery.call_count, 3, SWbemServices.ExecQuery.call_count)
def test_raw_cache_qualifiers(self):
"""
Cache the qualifiers on the first query against RAW Performance classes.
"""
# Append `flag_use_amended_qualifiers` flag on the first query
wmi_raw_sampler = WMISampler("Win32_PerfRawData_PerfOS_System", ["CounterRawCount", "CounterCounter"]) # noqa
wmi_raw_sampler._query()
self.assertWMIQuery(flags=131120)
wmi_raw_sampler._query()
self.assertWMIQuery(flags=48)
# Qualifiers are cached
self.assertTrue(wmi_raw_sampler._property_counter_types)
self.assertIn('CounterRawCount', wmi_raw_sampler._property_counter_types)
self.assertIn('CounterCounter', wmi_raw_sampler._property_counter_types)
def test_raw_properties_formatting(self):
"""
WMI Object's RAW data are returned formatted.
"""
wmi_raw_sampler = WMISampler("Win32_PerfRawData_PerfOS_System", ["CounterRawCount", "CounterCounter"]) # noqa
wmi_raw_sampler.sample()
self.assertWMISampler(
wmi_raw_sampler,
[
("CounterRawCount", 500), ("CounterCounter", 50),
"Timestamp_Sys100NS", "Frequency_Sys100NS", "name"
],
count=2
)
def test_raw_properties_fallback(self):
"""
Print a warning on RAW Performance classes if the calculator is undefined.
Returns the original RAW value.
"""
from checks.libs.wmi.sampler import WMISampler
logger = Mock()
wmi_raw_sampler = WMISampler(logger, "Win32_PerfRawData_PerfOS_System", ["UnknownCounter", "MissingProperty"]) # noqa
wmi_raw_sampler.sample()
self.assertWMISampler(
wmi_raw_sampler,
[
("UnknownCounter", 999), "Timestamp_Sys100NS", "Frequency_Sys100NS", "Name"
],
count=1
)
self.assertTrue(logger.warning.called)
def test_missing_property(self):
"""
Do not raise on missing properties but backfill with empty values.
"""
wmi_raw_sampler = WMISampler("Win32_PerfRawData_PerfOS_System", ["UnknownCounter", "MissingProperty"]) # noqa
wmi_raw_sampler.sample()
self.assertWMISampler(wmi_raw_sampler, ["MissingProperty"], count=1)
class TestIntegrationWMI(unittest.TestCase):
"""
Integration tests for WMISampler.
"""
pass
|
|
import sys
import re
import os
import json
import MarkdownPP
################################################################################
### @brief length of the swagger definition namespace
################################################################################
defLen = len('#/definitions/')
################################################################################
### @brief facility to remove leading and trailing html-linebreaks
################################################################################
removeTrailingBR = re.compile("<br>$")
removeLeadingBR = re.compile("^<br>")
def brTrim(text):
return removeLeadingBR.sub("", removeTrailingBR.sub("", text.strip(' ')))
swagger = None
dokuBlocks = [{},{}]
thisVerb = {}
route = ''
verb = ''
def getReference(name, source, verb):
try:
ref = name['$ref'][defLen:]
except Exception as x:
print >>sys.stderr, "No reference in: "
print >>sys.stderr, name
raise
if not ref in swagger['definitions']:
fn = ''
if verb:
fn = swagger['paths'][route][verb]['x-filename']
else:
fn = swagger['definitions'][source]['x-filename']
print >> sys.stderr, json.dumps(swagger['definitions'], indent=4, separators=(', ',': '), sort_keys=True)
raise Exception("invalid reference: " + ref + " in " + fn)
return ref
def unwrapPostJson(reference, layer):
global swagger
rc = ''
for param in swagger['definitions'][reference]['properties'].keys():
thisParam = swagger['definitions'][reference]['properties'][param]
required = ('required' in swagger['definitions'][reference] and
param in swagger['definitions'][reference]['required'])
if '$ref' in thisParam:
subStructRef = getReference(thisParam, reference, None)
rc += "<li><strong>" + param + "</strong>: "
rc += swagger['definitions'][subStructRef]['description'] + "<ul class=\"swagger-list\">"
rc += unwrapPostJson(subStructRef, layer + 1)
rc += "</li></ul>"
elif thisParam['type'] == 'object':
rc += ' ' * layer + "<li><strong>" + param + "</strong>: " + brTrim(thisParam['description']) + "</li>"
elif swagger['definitions'][reference]['properties'][param]['type'] == 'array':
rc += ' ' * layer + "<li><strong>" + param + "</strong>: " + brTrim(thisParam['description'])
if 'type' in thisParam['items']:
rc += " of type " + thisParam['items']['type']#
else:
if len(thisParam['items']) == 0:
rc += "anonymous json object"
else:
try:
subStructRef = getReference(thisParam['items'], reference, None)
except:
print >>sys.stderr, "while analyzing: " + param
print >>sys.stderr, thisParam
rc += "\n<ul class=\"swagger-list\">"
rc += unwrapPostJson(subStructRef, layer + 1)
rc += "</ul>"
rc += '</li>'
else:
rc += ' ' * layer + "<li><strong>" + param + "</strong>: " + thisParam['description'] + '</li>'
return rc
def getRestBodyParam():
rc = "\n**Body Parameters**\n"
addText = ''
for nParam in range(0, len(thisVerb['parameters'])):
if thisVerb['parameters'][nParam]['in'] == 'body':
descOffset = thisVerb['parameters'][nParam]['x-description-offset']
addText = ''
if 'additionalProperties' in thisVerb['parameters'][nParam]['schema']:
addText = "free style json body"
else:
addText = "<ul class=\"swagger-list\">" + unwrapPostJson(
getReference(thisVerb['parameters'][nParam]['schema'], route, verb),0) + "</ul>"
rc += addText
return rc
def getRestReplyBodyParam(param):
rc = "\n**Reply Body**\n<ul>"
try:
rc += unwrapPostJson(getReference(thisVerb['responses'][param]['schema'], route, verb), 0)
except Exception:
print "failed to search " + param + " in: "
print json.dumps(thisVerb, indent=4, separators=(', ',': '), sort_keys=True)
raise
return rc + "</ul>\n"
SIMPL_REPL_DICT = {
"@RESTDESCRIPTION" : "",
"@RESTURLPARAMETERS" : "\n**URL Parameters**\n",
"@RESTQUERYPARAMETERS" : "\n**Query Parameters**\n",
"@RESTHEADERPARAMETERS" : "\n**Header Parameters**\n",
"@RESTRETURNCODES" : "\n**Return Codes**\n",
"@PARAMS" : "\n**Parameters**\n",
"@RESTPARAMS" : "",
"@RESTURLPARAMS" : "\n**URL Parameters**\n",
"@RESTQUERYPARAMS" : "\n**Query Parameters**\n",
"@RESTBODYPARAM" : getRestBodyParam,
"@RESTREPLYBODY" : getRestReplyBodyParam,
"@RESTQUERYPARAM" : "@RESTPARAM",
"@RESTURLPARAM" : "@RESTPARAM",
"@PARAM" : "@RESTPARAM",
"@RESTHEADERPARAM" : "@RESTPARAM",
"@EXAMPLES" : "\n**Examples**\n",
"@RESTPARAMETERS" : ""
}
SIMPLE_RX = re.compile(
r'''
@RESTDESCRIPTION| # -> <empty>
@RESTURLPARAMETERS| # -> \n**URL Parameters**\n
@RESTQUERYPARAMETERS| # -> \n**Query Parameters**\n
@RESTHEADERPARAMETERS| # -> \n**Header Parameters**\n
@RESTBODYPARAM| # -> call post body param
@RESTRETURNCODES| # -> \n**Return Codes**\n
@PARAMS| # -> \n**Parameters**\n
@RESTPARAMS| # -> <empty>
@RESTURLPARAMS| # -> <empty>
@RESTQUERYPARAMS| # -> <empty>
@PARAM| # -> @RESTPARAM
@RESTURLPARAM| # -> @RESTPARAM
@RESTQUERYPARAM| # -> @RESTPARAM
@RESTHEADERPARAM| # -> @RESTPARAM
@EXAMPLES| # -> \n**Examples**\n
@RESTPARAMETERS| # -> <empty>
@RESTREPLYBODY\{(.*)\} # -> call body function
''', re.X)
def SimpleRepl(match):
m = match.group(0)
#print 'xxxxx ' + m
try:
n = SIMPL_REPL_DICT[m]
if n == None:
raise Exception("failed to find regex while searching for: " + m)
else:
if type(n) == type(''):
return n
else:
return n()
except Exception:
pos = m.find('{')
if pos > 0:
newMatch = m[:pos]
param = m[pos + 1 :].rstrip(' }')
try:
n = SIMPL_REPL_DICT[newMatch]
if n == None:
raise Exception("failed to find regex while searching for: " +
newMatch + " extracted from: " + m)
else:
if type(n) == type(''):
return n
else:
return n(param)
except Exception as x:
#raise Exception("failed to find regex while searching for: " +
# newMatch + " extracted from: " + m)
raise
else:
raise Exception("failed to find regex while searching for: " + m)
RX = [
(re.compile(r"<!--(\s*.+\s)-->"), ""),
# remove the placeholder BR's again
(re.compile(r"<br />\n"), "\n"),
# multi line bullet lists should become one
(re.compile(r"\n\n-"), "\n-"),
#HTTP API changing code
# unwrap multi-line-briefs: (up to 3 lines supported by now ;-)
(re.compile(r"@brief(.+)\n(.+)\n(.+)\n\n"), r"@brief\g<1> \g<2> \g<3>\n\n"),
(re.compile(r"@brief(.+)\n(.+)\n\n"), r"@brief\g<1> \g<2>\n\n"),
# if there is an @brief above a RESTHEADER, swap the sequence
(re.compile(r"@brief(.+\n*)\n\n@RESTHEADER{([#\s\w\/\_{}-]*),([\s\w-]*)}"), r"###\g<3>\n\g<1>\n\n`\g<2>`"),
# else simply put it into the text
(re.compile(r"@brief(.+)"), r"\g<1>"),
# there should be no RESTHEADER without brief, so we will fail offensively if by not doing
#(re.compile(r"@RESTHEADER{([\s\w\/\_{}-]*),([\s\w-]*)}"), r"###\g<2>\n`\g<1>`"),
# Error codes replace
(re.compile(r"(####)#+"), r""),
# (re.compile(r"- (\w+):\s*@LIT{(.+)}"), r"\n*\g<1>* - **\g<2>**:"),
(re.compile(r"(.+),(\d+),\"(.+)\",\"(.+)\""), r"\n*\g<2>* - **\g<3>**: \g<4>"),
(re.compile(r"TODOSWAGGER.*"),r"")
]
# (re.compile(r"@RESTPARAM{([\s\w-]*),([\s\w\_\|-]*),\s*(\w+)}"), r"* *\g<1>*:"),
# (re.compile(r"@RESTRETURNCODE{(.*)}"), r"* *\g<1>*:"),
# (re.compile(r"@RESTBODYPARAMS{(.*)}"), r"*(\g<1>)*"),
RX2 = [
# parameters - extract their type and whether mandatory or not.
(re.compile(r"@RESTPARAM{(\s*[\w\-]*)\s*,\s*([\w\_\|-]*)\s*,\s*(required|optional)}"), r"* *\g<1>* (\g<3>):"),
(re.compile(r"@RESTALLBODYPARAM{(\s*[\w\-]*)\s*,\s*([\w\_\|-]*)\s*,\s*(required|optional)}"), r"**Post Body**\n *\g<1>* (\g<3>):"),
(re.compile(r"@RESTRETURNCODE{(.*)}"), r"* *\g<1>*:")
]
match_RESTHEADER = re.compile(r"@RESTHEADER\{(.*)\}")
match_RESTRETURNCODE = re.compile(r"@RESTRETURNCODE\{(.*)\}")
have_RESTBODYPARAM = re.compile(r"@RESTBODYPARAM")
have_RESTREPLYBODY = re.compile(r"@RESTREPLYBODY")
have_RESTSTRUCT = re.compile(r"@RESTSTRUCT")
remove_MULTICR = re.compile(r'\n\n\n*')
def _mkdir_recursive(path):
sub_path = os.path.dirname(path)
if not os.path.exists(sub_path):
_mkdir_recursive(sub_path)
if not os.path.exists(path):
os.mkdir(path)
def replaceCode(lines, blockName):
global swagger, thisVerb, route, verb
thisVerb = {}
foundRest = False
# first find the header:
headerMatch = match_RESTHEADER.search(lines)
if headerMatch and headerMatch.lastindex > 0:
foundRest = True
try:
(verb,route) = headerMatch.group(1).split(',')[0].split(' ')
verb = verb.lower()
except:
print >> sys.stderr, "failed to parse header from: " + headerMatch.group(1) + " while analysing " + blockName
raise
try:
thisVerb = swagger['paths'][route][verb]
except:
print >> sys.stderr, "failed to locate route in the swagger json: [" + verb + " " + route + "]" + " while analysing " + blockName
print >> sys.stderr, lines
raise
for (oneRX, repl) in RX:
lines = oneRX.sub(repl, lines)
if foundRest:
rcCode = None
foundRestBodyParam = False
foundRestReplyBodyParam = False
lineR = lines.split('\n')
l = len(lineR)
r = 0
while (r < l):
# remove all but the first RESTBODYPARAM:
if have_RESTBODYPARAM.search(lineR[r]):
if foundRestBodyParam:
lineR[r] = ''
else:
lineR[r] = '@RESTBODYPARAM'
foundRestBodyParam = True
r+=1
while (len(lineR[r]) > 1):
lineR[r] = ''
r+=1
m = match_RESTRETURNCODE.search(lineR[r])
if m and m.lastindex > 0:
rcCode = m.group(1)
# remove all but the first RESTREPLYBODY:
if have_RESTREPLYBODY.search(lineR[r]):
if foundRestReplyBodyParam != rcCode:
lineR[r] = '@RESTREPLYBODY{' + rcCode + '}\n'
else:
lineR[r] = ''
foundRestReplyBodyParam = rcCode
r+=1
while (len(lineR[r]) > 1):
lineR[r] = ''
r+=1
m = match_RESTRETURNCODE.search(lineR[r])
if m and m.lastindex > 0:
rcCode = m.group(1)
# remove all RESTSTRUCTS - they're referenced anyways:
if have_RESTSTRUCT.search(lineR[r]):
while (len(lineR[r]) > 1):
lineR[r] = ''
r+=1
r+=1
lines = "\n".join(lineR)
lines = SIMPLE_RX.sub(SimpleRepl, lines)
for (oneRX, repl) in RX2:
lines = oneRX.sub(repl, lines)
lines = remove_MULTICR.sub("\n\n", lines)
#print lines
return lines
def replaceCodeIndex(lines):
lines = re.sub(r"<!--(\s*.+\s)-->","", lines)
#HTTP API changing code
#lines = re.sub(r"@brief(.+)",r"\g<1>", lines)
#lines = re.sub(r"@RESTHEADER{([\s\w\/\_{}-]*),([\s\w-]*)}", r"###\g<2>\n`\g<1>`", lines)
return lines
RXFinal = [
(re.compile(r"@anchor (.*)"), "<a name=\"\g<1>\">#</a>")
]
def replaceCodeFullFile(lines):
for (oneRX, repl) in RXFinal:
lines = oneRX.sub(repl, lines)
return lines
################################################################################
# main loop over all files
################################################################################
def walk_on_files(inDirPath, outDirPath):
for root, dirs, files in os.walk(inDirPath):
for file in files:
if file.endswith(".mdpp"):
inFileFull = os.path.join(root, file)
outFileFull = os.path.join(outDirPath, re.sub(r'mdpp$', 'md', inFileFull))
print "%s -> %s" % (inFileFull, outFileFull)
_mkdir_recursive(os.path.join(outDirPath, root))
mdpp = open(inFileFull, "r")
md = open(outFileFull, "w")
MarkdownPP.MarkdownPP(input=mdpp, output=md, modules=MarkdownPP.modules.keys())
mdpp.close()
md.close()
findStartCode(md, outFileFull)
def findStartCode(fd,full_path):
inFD = open(full_path, "r")
textFile =inFD.read()
inFD.close()
#print "-" * 80
#print textFile
matchInline = re.findall(r'@startDocuBlockInline\s*(\w+)', textFile)
if matchInline:
for find in matchInline:
#print "7"*80
print full_path + " " + find
textFile = replaceTextInline(textFile, full_path, find)
#print textFile
match = re.findall(r'@startDocuBlock\s*(\w+)', textFile)
if match:
for find in match:
#print "8"*80
textFile = replaceText(textFile, full_path, find)
#print textFile
try:
textFile = replaceCodeFullFile(textFile)
except:
print >>sys.stderr, "while parsing :\n" + textFile
raise
#print "9" * 80
#print textFile
outFD = open(full_path, "w")
outFD.truncate()
outFD.write(textFile)
outFD.close()
def replaceText(text, pathOfFile, searchText):
''' reads the mdpp and generates the md '''
global dokuBlocks
if not searchText in dokuBlocks[0]:
print >> sys.stderr, "Failed to locate the docublock '%s' for replacing it into the file '%s'\n have:" % (searchText, pathOfFile)
print >> sys.stderr, dokuBlocks[0].keys()
print >> sys.stderr, '*' * 80
print >> sys.stderr, text
exit(1)
#print dokuBlocks[0][searchText]
rc= re.sub("@startDocuBlock\s+"+ searchText + "(?:\s+|$)", dokuBlocks[0][searchText], text)
return rc
def replaceTextInline(text, pathOfFile, searchText):
''' reads the mdpp and generates the md '''
global dokuBlocks
if not searchText in dokuBlocks[1]:
print >> sys.stderr, "Failed to locate the inline docublock '%s' for replacing it into the file '%s'\n have:" % (searchText, pathOfFile)
print dokuBlocks[1].keys()
print '*' * 80
print text
exit(1)
rePattern = r'(?s)\s*@startDocuBlockInline\s+'+ searchText +'.*@endDocuBlock\s' + searchText
# (?s) is equivalent to flags=re.DOTALL but works in Python 2.6
match = re.search(rePattern, text)
if (match == None):
print >> sys.stderr, "failed to match with '%s' for %s in file %s in: \n%s" % (rePattern, searchText, pathOfFile, text)
exit(1)
subtext = match.group(0)
if (len(re.findall('@startDocuBlock', subtext)) > 1):
print >> sys.stderr, "failed to snap with '%s' on end docublock for %s in %s our match is:\n%s" % (rePattern, searchText, pathOfFile, subtext)
exit(1)
return re.sub(rePattern, dokuBlocks[1][searchText], text)
################################################################################
# Read the docublocks into memory
################################################################################
thisBlock = ""
thisBlockName = ""
thisBlockType = 0
STATE_SEARCH_START = 0
STATE_SEARCH_END = 1
SEARCH_START = re.compile(r" *start[0-9a-zA-Z]*\s\s*([0-9a-zA-Z_ ]*)\s*$")
def readStartLine(line):
global thisBlockName, thisBlockType, thisBlock, dokuBlocks
if ("@startDocuBlock" in line):
if "@startDocuBlockInline" in line:
thisBlockType = 1
else:
thisBlockType = 0
try:
thisBlockName = SEARCH_START.search(line).group(1).strip()
except:
print >> sys.stderr, "failed to read startDocuBlock: [" + line + "]"
exit(1)
dokuBlocks[thisBlockType][thisBlockName] = ""
return STATE_SEARCH_END
return STATE_SEARCH_START
def readNextLine(line):
global thisBlockName, thisBlockType, thisBlock, dokuBlocks
if '@endDocuBlock' in line:
return STATE_SEARCH_START
dokuBlocks[thisBlockType][thisBlockName] += line
#print "reading " + thisBlockName
#print dokuBlocks[thisBlockType][thisBlockName]
return STATE_SEARCH_END
def loadDokuBlocks():
state = STATE_SEARCH_START
f=open("allComments.txt", 'rU')
count = 0
for line in f.readlines():
if state == STATE_SEARCH_START:
state = readStartLine(line)
elif state == STATE_SEARCH_END:
state = readNextLine(line)
#if state == STATE_SEARCH_START:
# print dokuBlocks[thisBlockType].keys()
for oneBlock in dokuBlocks[0]:
try:
dokuBlocks[0][oneBlock] = replaceCode(dokuBlocks[0][oneBlock], oneBlock)
except:
print >>sys.stderr, "while parsing :\n" + oneBlock
raise
for oneBlock in dokuBlocks[1]:
try:
dokuBlocks[1][oneBlock] = replaceCode(dokuBlocks[1][oneBlock], oneBlock)
except:
print >>sys.stderr, "while parsing :\n" + oneBlock
raise
if __name__ == '__main__':
if len(sys.argv) < 2:
print("usage: input-directory output-directory")
exit(1)
inDir = sys.argv[1]
outDir = sys.argv[2]
swaggerJson = sys.argv[3]
f=open(swaggerJson, 'rU')
swagger= json.load(f)
f.close()
loadDokuBlocks()
print "loaded %d / %d docu blocks" % (len(dokuBlocks[0]), len(dokuBlocks[1]))
#print dokuBlocks[0].keys()
walk_on_files(inDir, outDir)
|
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds a sitelinks feed and associates it with a campaign.
To add sitelinks using the simpler ExtensionSetting services, see:
add_sitelinks.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: CampaignFeedService.mutate, FeedItemService.mutate
Tags: FeedMappingService.mutate, FeedService.mutate
Api: AdWordsOnly
"""
__author__ = 'Joseph DiLallo'
import re
import uuid
from googleads import adwords
from googleads import errors
# See the Placeholder reference page for a list of all the placeholder types and
# fields.
# https://developers.google.com/adwords/api/docs/appendix/placeholders.html
PLACEHOLDER_SITELINKS = '1'
PLACEHOLDER_FIELD_SITELINK_LINK_TEXT = '1'
PLACEHOLDER_FIELD_SITELINK_FINAL_URLS = '5'
PLACEHOLDER_FIELD_LINE_1_TEXT = '3'
PLACEHOLDER_FIELD_LINE_2_TEXT = '4'
CAMPAIGN_ID = 'INSERT_CAMPAIGN_ID_HERE'
def main(client, campaign_id):
# Initialize appropriate service.
feed_service = client.GetService('FeedService', version='v201502')
feed_item_service = client.GetService('FeedItemService', version='v201502')
feed_mapping_service = client.GetService(
'FeedMappingService', version='v201502')
campaign_feed_service = client.GetService(
'CampaignFeedService', version='v201502')
sitelinks_data = {}
# Create site links feed first.
site_links_feed = {
'name': 'Feed For Site Links #%s' % uuid.uuid4(),
'attributes': [
{'type': 'STRING', 'name': 'Link Text'},
{'type': 'URL_LIST', 'name': 'Link Final URLs'},
{'type': 'STRING', 'name': 'Line 1 Description'},
{'type': 'STRING', 'name': 'Line 2 Description'}
]
}
response = feed_service.mutate([
{'operator': 'ADD', 'operand': site_links_feed}
])
if 'value' in response:
feed = response['value'][0]
link_text_feed_attribute_id = feed['attributes'][0]['id']
final_url_feed_attribute_id = feed['attributes'][1]['id']
line_1_feed_attribute_id = feed['attributes'][2]['id']
line_2_feed_attribute_id = feed['attributes'][3]['id']
print ('Feed with name \'%s\' and ID \'%s\' was added with' %
(feed['name'], feed['id']))
print ('\tText attribute ID \'%s\' and Final URL attribute ID \'%s\'.' %
(link_text_feed_attribute_id, final_url_feed_attribute_id))
print ('\tLine 1 attribute ID \'%s\' and Line 2 attribute ID \'%s\'.' %
(line_1_feed_attribute_id, line_2_feed_attribute_id))
sitelinks_data['feedId'] = feed['id']
sitelinks_data['linkTextFeedId'] = link_text_feed_attribute_id
sitelinks_data['finalUrlFeedId'] = final_url_feed_attribute_id
sitelinks_data['line1FeedId'] = line_1_feed_attribute_id
sitelinks_data['line2FeedId'] = line_2_feed_attribute_id
else:
raise errors.GoogleAdsError('No feeds were added.')
# Create site links feed items.
items_data = [
{'text': 'Home', 'finalUrls': 'http://www.example.com',
'line1': 'Home line 1', 'line2': 'Home line 2'},
{'text': 'Stores', 'finalUrls': 'http://www.example.com/stores',
'line1': 'Stores line 1', 'line2': 'Stores line 2'},
{'text': 'On Sale', 'finalUrls': 'http://www.example.com/sale',
'line1': 'On Sale line 1', 'line2': 'On Sale line 2'},
{'text': 'Support', 'finalUrls': 'http://www.example.com/support',
'line1': 'Support line 1', 'line2': 'Support line 2'},
{'text': 'Products', 'finalUrls': 'http://www.example.com/products',
'line1': 'Products line 1', 'line2': 'Products line 2'},
{'text': 'About', 'finalUrls': 'http://www.example.com/about',
'line1': 'About line 1', 'line2': 'About line 2'}
]
feed_items = []
for item in items_data:
feed_items.append({
'feedId': sitelinks_data['feedId'],
'attributeValues': [
{
'feedAttributeId': sitelinks_data['linkTextFeedId'],
'stringValue': item['text']
},
{
'feedAttributeId': sitelinks_data['finalUrlFeedId'],
'stringValues': [item['finalUrls']]
},
{
'feedAttributeId': sitelinks_data['line1FeedId'],
'stringValue': item['line1']
},
{
'feedAttributeId': sitelinks_data['line2FeedId'],
'stringValue': item['line2']
}
],
# Optional: use the 'startTime' and 'endTime' keys to specify the time
# period for the feed to deliver. The example below will make the feed
# start now and stop in one month.
# Make sure you specify the datetime in the customer's time zone. You
# can retrieve this from customer['dateTimeZone'].
#
# ['startTime']: datetime.datetime.now().strftime('%Y%m%d %H%M%S')
# ['endTime']: (datetime.datetime.now() +
# relativedelta(months=1)).strftime('%Y%m%d %H%M%S')
# Optional: use the 'scheduling' key to specify time and days of the
# week for feed to deliver. This is a Beta feature.
})
feed_items_operations = [{'operator': 'ADD', 'operand': item} for item
in feed_items]
response = feed_item_service.mutate(feed_items_operations)
if 'value' in response:
sitelinks_data['feedItemIds'] = []
for feed_item in response['value']:
print 'Feed item with ID %s was added.' % feed_item['feedItemId']
sitelinks_data['feedItemIds'].append(feed_item['feedItemId'])
else:
raise errors.GoogleAdsError('No feed items were added.')
# Create site links feed mapping.
feed_mapping = {
'placeholderType': PLACEHOLDER_SITELINKS,
'feedId': sitelinks_data['feedId'],
'attributeFieldMappings': [
{
'feedAttributeId': sitelinks_data['linkTextFeedId'],
'fieldId': PLACEHOLDER_FIELD_SITELINK_LINK_TEXT
},
{
'feedAttributeId': sitelinks_data['finalUrlFeedId'],
'fieldId': PLACEHOLDER_FIELD_SITELINK_FINAL_URLS
},
{
'feedAttributeId': sitelinks_data['line1FeedId'],
'fieldId': PLACEHOLDER_FIELD_LINE_1_TEXT
},
{
'feedAttributeId': sitelinks_data['line2FeedId'],
'fieldId': PLACEHOLDER_FIELD_LINE_2_TEXT
}
]
}
response = feed_mapping_service.mutate([
{'operator': 'ADD', 'operand': feed_mapping}
])
if 'value' in response:
feed_mapping = response['value'][0]
print ('Feed mapping with ID %s and placeholder type %s was saved for feed'
' with ID %s.' %
(feed_mapping['feedMappingId'], feed_mapping['placeholderType'],
feed_mapping['feedId']))
else:
raise errors.GoogleAdsError('No feed mappings were added.')
# Construct a matching function that associates the sitelink feeditems to the
# campaign, and set the device preference to Mobile. For more details, see the
# matching function guide:
# https://developers.google.com/adwords/api/docs/guides/feed-matching-functions
matching_function_string = (
'AND(IN(FEED_ITEM_ID, {%s}), EQUALS(CONTEXT.DEVICE, \'Mobile\'))' %
re.sub(r'\[|\]|L', '', str(sitelinks_data['feedItemIds'])))
campaign_feed = {
'feedId': sitelinks_data['feedId'],
'campaignId': campaign_id,
'matchingFunction': {'functionString': matching_function_string},
# Specifying placeholder types on the CampaignFeed allows the same feed
# to be used for different placeholders in different Campaigns.
'placeholderTypes': [PLACEHOLDER_SITELINKS]
}
response = campaign_feed_service.mutate([
{'operator': 'ADD', 'operand': campaign_feed}
])
if 'value' in response:
campaign_feed = response['value'][0]
print ('Campaign with ID %s was associated with feed with ID %s.' %
(campaign_feed['campaignId'], campaign_feed['feedId']))
else:
raise errors.GoogleAdsError('No campaign feeds were added.')
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, CAMPAIGN_ID)
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.webmisc
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for misc. web stuff.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \
default, using
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Literal
from pygments.util import unirange
from pygments.lexers.css import _indentation, _starts_block
from pygments.lexers.html import HtmlLexer
from pygments.lexers.javascript import JavascriptLexer
from pygments.lexers.ruby import RubyLexer
__all__ = ['DuelLexer', 'SlimLexer', 'XQueryLexer', 'QmlLexer', 'CirruLexer']
class DuelLexer(RegexLexer):
"""
Lexer for Duel Views Engine (formerly JBST) markup with JavaScript code blocks.
See http://duelengine.org/.
See http://jsonml.org/jbst/.
.. versionadded:: 1.4
"""
name = 'Duel'
aliases = ['duel', 'jbst', 'jsonml+bst']
filenames = ['*.duel', '*.jbst']
mimetypes = ['text/x-duel', 'text/x-jbst']
flags = re.DOTALL
tokens = {
'root': [
(r'(<%[@=#!:]?)(.*?)(%>)',
bygroups(Name.Tag, using(JavascriptLexer), Name.Tag)),
(r'(<%\$)(.*?)(:)(.*?)(%>)',
bygroups(Name.Tag, Name.Function, Punctuation, String, Name.Tag)),
(r'(<%--)(.*?)(--%>)',
bygroups(Name.Tag, Comment.Multiline, Name.Tag)),
(r'(<script.*?>)(.*?)(</script>)',
bygroups(using(HtmlLexer),
using(JavascriptLexer), using(HtmlLexer))),
(r'(.+?)(?=<)', using(HtmlLexer)),
(r'.+', using(HtmlLexer)),
],
}
class XQueryLexer(ExtendedRegexLexer):
"""
An XQuery lexer, parsing a stream and outputting the tokens needed to
highlight xquery code.
.. versionadded:: 1.4
"""
name = 'XQuery'
aliases = ['xquery', 'xqy', 'xq', 'xql', 'xqm']
filenames = ['*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm']
mimetypes = ['text/xquery', 'application/xquery']
xquery_parse_state = []
# FIX UNICODE LATER
# ncnamestartchar = (
# ur"[A-Z]|_|[a-z]|[\u00C0-\u00D6]|[\u00D8-\u00F6]|[\u00F8-\u02FF]|"
# ur"[\u0370-\u037D]|[\u037F-\u1FFF]|[\u200C-\u200D]|[\u2070-\u218F]|"
# ur"[\u2C00-\u2FEF]|[\u3001-\uD7FF]|[\uF900-\uFDCF]|[\uFDF0-\uFFFD]|"
# ur"[\u10000-\uEFFFF]"
# )
ncnamestartchar = r"(?:[A-Z]|_|[a-z])"
# FIX UNICODE LATER
# ncnamechar = ncnamestartchar + (ur"|-|\.|[0-9]|\u00B7|[\u0300-\u036F]|"
# ur"[\u203F-\u2040]")
ncnamechar = r"(?:" + ncnamestartchar + r"|-|\.|[0-9])"
ncname = "(?:%s+%s*)" % (ncnamestartchar, ncnamechar)
pitarget_namestartchar = r"(?:[A-KN-WYZ]|_|:|[a-kn-wyz])"
pitarget_namechar = r"(?:" + pitarget_namestartchar + r"|-|\.|[0-9])"
pitarget = "%s+%s*" % (pitarget_namestartchar, pitarget_namechar)
prefixedname = "%s:%s" % (ncname, ncname)
unprefixedname = ncname
qname = "(?:%s|%s)" % (prefixedname, unprefixedname)
entityref = r'(?:&(?:lt|gt|amp|quot|apos|nbsp);)'
charref = r'(?:&#[0-9]+;|&#x[0-9a-fA-F]+;)'
stringdouble = r'(?:"(?:' + entityref + r'|' + charref + r'|""|[^&"])*")'
stringsingle = r"(?:'(?:" + entityref + r"|" + charref + r"|''|[^&'])*')"
# FIX UNICODE LATER
# elementcontentchar = (ur'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|'
# ur'[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
elementcontentchar = r'[A-Za-z]|\s|\d|[!"#$%()*+,\-./:;=?@\[\\\]^_\'`|~]'
# quotattrcontentchar = (ur'\t|\r|\n|[\u0020-\u0021]|[\u0023-\u0025]|'
# ur'[\u0027-\u003b]|[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
quotattrcontentchar = r'[A-Za-z]|\s|\d|[!#$%()*+,\-./:;=?@\[\\\]^_\'`|~]'
# aposattrcontentchar = (ur'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|'
# ur'[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
aposattrcontentchar = r'[A-Za-z]|\s|\d|[!"#$%()*+,\-./:;=?@\[\\\]^_`|~]'
# CHAR elements - fix the above elementcontentchar, quotattrcontentchar,
# aposattrcontentchar
# x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
flags = re.DOTALL | re.MULTILINE | re.UNICODE
def punctuation_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
# transition to root always - don't pop off stack
ctx.stack = ['root']
ctx.pos = match.end()
def operator_root_callback(lexer, match, ctx):
yield match.start(), Operator, match.group(1)
# transition to root always - don't pop off stack
ctx.stack = ['root']
ctx.pos = match.end()
def popstate_tag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
ctx.stack.append(lexer.xquery_parse_state.pop())
ctx.pos = match.end()
def popstate_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append(lexer.xquery_parse_state.pop())
ctx.pos = match.end()
def popstate_kindtest_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
next_state = lexer.xquery_parse_state.pop()
if next_state == 'occurrenceindicator':
if re.match("[?*+]+", match.group(2)):
yield match.start(), Punctuation, match.group(2)
ctx.stack.append('operator')
ctx.pos = match.end()
else:
ctx.stack.append('operator')
ctx.pos = match.end(1)
else:
ctx.stack.append(next_state)
ctx.pos = match.end(1)
def popstate_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
# if we have run out of our state stack, pop whatever is on the pygments
# state stack
if len(lexer.xquery_parse_state) == 0:
ctx.stack.pop()
elif len(ctx.stack) > 1:
ctx.stack.append(lexer.xquery_parse_state.pop())
else:
# i don't know if i'll need this, but in case, default back to root
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_element_content_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append('element_content')
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append(ctx.state.pop)
ctx.pos = match.end()
def pushstate_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append(ctx.state.pop)
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_operator_order_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_map_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_root_validate(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_root_validate_withmode(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Keyword, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_processing_instruction_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('processing_instruction')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_element_content_processing_instruction_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('processing_instruction')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_element_content_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_operator_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_element_content_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('xml_comment')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_operator_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('xml_comment')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_kindtest_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('kindtest')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_operator_kindtestforpi_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('kindtestforpi')
ctx.pos = match.end()
def pushstate_operator_kindtest_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_occurrenceindicator_kindtest_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('occurrenceindicator')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_operator_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_operator_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
lexer.xquery_parse_state.append('operator')
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_operator_root_construct_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
cur_state = ctx.stack.pop()
lexer.xquery_parse_state.append(cur_state)
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_operator_attribute_callback(lexer, match, ctx):
yield match.start(), Name.Attribute, match.group(1)
ctx.stack.append('operator')
ctx.pos = match.end()
def pushstate_operator_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
tokens = {
'comment': [
# xquery comments
(r'(:\))', Comment, '#pop'),
(r'(\(:)', Comment, '#push'),
(r'[^:)]', Comment),
(r'([^:)]|:|\))', Comment),
],
'whitespace': [
(r'\s+', Text),
],
'operator': [
include('whitespace'),
(r'(\})', popstate_callback),
(r'\(:', Comment, 'comment'),
(r'(\{)', pushstate_root_callback),
(r'then|else|external|at|div|except', Keyword, 'root'),
(r'order by', Keyword, 'root'),
(r'group by', Keyword, 'root'),
(r'is|mod|order\s+by|stable\s+order\s+by', Keyword, 'root'),
(r'and|or', Operator.Word, 'root'),
(r'(eq|ge|gt|le|lt|ne|idiv|intersect|in)(?=\b)',
Operator.Word, 'root'),
(r'return|satisfies|to|union|where|count|preserve\s+strip',
Keyword, 'root'),
(r'(>=|>>|>|<=|<<|<|-|\*|!=|\+|\|\||\||:=|=|!)',
operator_root_callback),
(r'(::|:|;|\[|//|/|,)',
punctuation_root_callback),
(r'(castable|cast)(\s+)(as)\b',
bygroups(Keyword, Text, Keyword), 'singletype'),
(r'(instance)(\s+)(of)\b',
bygroups(Keyword, Text, Keyword), 'itemtype'),
(r'(treat)(\s+)(as)\b',
bygroups(Keyword, Text, Keyword), 'itemtype'),
(r'(case)(\s+)(' + stringdouble + ')',
bygroups(Keyword, Text, String.Double), 'itemtype'),
(r'(case)(\s+)(' + stringsingle + ')',
bygroups(Keyword, Text, String.Single), 'itemtype'),
(r'(case|as)\b', Keyword, 'itemtype'),
(r'(\))(\s*)(as)',
bygroups(Punctuation, Text, Keyword), 'itemtype'),
(r'\$', Name.Variable, 'varname'),
(r'(for|let|previous|next)(\s+)(\$)',
bygroups(Keyword, Text, Name.Variable), 'varname'),
(r'(for)(\s+)(tumbling|sliding)(\s+)(window)(\s+)(\$)',
bygroups(Keyword, Text, Keyword, Text, Keyword, Text, Name.Variable),
'varname'),
# (r'\)|\?|\]', Punctuation, '#push'),
(r'\)|\?|\]', Punctuation),
(r'(empty)(\s+)(greatest|least)', bygroups(Keyword, Text, Keyword)),
(r'ascending|descending|default', Keyword, '#push'),
(r'(allowing)(\s+)(empty)', bygroups(Keyword, Text, Keyword)),
(r'external', Keyword),
(r'(start|when|end)', Keyword, 'root'),
(r'(only)(\s+)(end)', bygroups(Keyword, Text, Keyword), 'root'),
(r'collation', Keyword, 'uritooperator'),
# eXist specific XQUF
(r'(into|following|preceding|with)', Keyword, 'root'),
# support for current context on rhs of Simple Map Operator
(r'\.', Operator),
# finally catch all string literals and stay in operator state
(stringdouble, String.Double),
(stringsingle, String.Single),
(r'(catch)(\s*)', bygroups(Keyword, Text), 'root'),
],
'uritooperator': [
(stringdouble, String.Double, '#pop'),
(stringsingle, String.Single, '#pop'),
],
'namespacedecl': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'(at)(\s+)('+stringdouble+')', bygroups(Keyword, Text, String.Double)),
(r"(at)(\s+)("+stringsingle+')', bygroups(Keyword, Text, String.Single)),
(stringdouble, String.Double),
(stringsingle, String.Single),
(r',', Punctuation),
(r'=', Operator),
(r';', Punctuation, 'root'),
(ncname, Name.Namespace),
],
'namespacekeyword': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(stringdouble, String.Double, 'namespacedecl'),
(stringsingle, String.Single, 'namespacedecl'),
(r'inherit|no-inherit', Keyword, 'root'),
(r'namespace', Keyword, 'namespacedecl'),
(r'(default)(\s+)(element)', bygroups(Keyword, Text, Keyword)),
(r'preserve|no-preserve', Keyword),
(r',', Punctuation),
],
'annotationname': [
(r'\(:', Comment, 'comment'),
(qname, Name.Decorator),
(r'(\()(' + stringdouble + ')', bygroups(Punctuation, String.Double)),
(r'(\()(' + stringsingle + ')', bygroups(Punctuation, String.Single)),
(r'(\,)(\s+)(' + stringdouble + ')',
bygroups(Punctuation, Text, String.Double)),
(r'(\,)(\s+)(' + stringsingle + ')',
bygroups(Punctuation, Text, String.Single)),
(r'\)', Punctuation),
(r'(\s+)(\%)', bygroups(Text, Name.Decorator), 'annotationname'),
(r'(\s+)(variable)(\s+)(\$)',
bygroups(Text, Keyword.Declaration, Text, Name.Variable), 'varname'),
(r'(\s+)(function)(\s+)',
bygroups(Text, Keyword.Declaration, Text), 'root')
],
'varname': [
(r'\(:', Comment, 'comment'),
(r'(' + qname + r')(\()?', bygroups(Name, Punctuation), 'operator'),
],
'singletype': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(ncname + r'(:\*)', Name.Variable, 'operator'),
(qname, Name.Variable, 'operator'),
],
'itemtype': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'\$', Name.Variable, 'varname'),
(r'(void)(\s*)(\()(\s*)(\))',
bygroups(Keyword, Text, Punctuation, Text, Punctuation), 'operator'),
(r'(element|attribute|schema-element|schema-attribute|comment|text|'
r'node|binary|document-node|empty-sequence)(\s*)(\()',
pushstate_occurrenceindicator_kindtest_callback),
# Marklogic specific type?
(r'(processing-instruction)(\s*)(\()',
bygroups(Keyword, Text, Punctuation),
('occurrenceindicator', 'kindtestforpi')),
(r'(item)(\s*)(\()(\s*)(\))(?=[*+?])',
bygroups(Keyword, Text, Punctuation, Text, Punctuation),
'occurrenceindicator'),
(r'(\(\#)(\s*)', bygroups(Punctuation, Text), 'pragma'),
(r';', Punctuation, '#pop'),
(r'then|else', Keyword, '#pop'),
(r'(at)(\s+)(' + stringdouble + ')',
bygroups(Keyword, Text, String.Double), 'namespacedecl'),
(r'(at)(\s+)(' + stringsingle + ')',
bygroups(Keyword, Text, String.Single), 'namespacedecl'),
(r'except|intersect|in|is|return|satisfies|to|union|where|count',
Keyword, 'root'),
(r'and|div|eq|ge|gt|le|lt|ne|idiv|mod|or', Operator.Word, 'root'),
(r':=|=|,|>=|>>|>|\[|\(|<=|<<|<|-|!=|\|\||\|', Operator, 'root'),
(r'external|at', Keyword, 'root'),
(r'(stable)(\s+)(order)(\s+)(by)',
bygroups(Keyword, Text, Keyword, Text, Keyword), 'root'),
(r'(castable|cast)(\s+)(as)',
bygroups(Keyword, Text, Keyword), 'singletype'),
(r'(treat)(\s+)(as)', bygroups(Keyword, Text, Keyword)),
(r'(instance)(\s+)(of)', bygroups(Keyword, Text, Keyword)),
(r'(case)(\s+)(' + stringdouble + ')',
bygroups(Keyword, Text, String.Double), 'itemtype'),
(r'(case)(\s+)(' + stringsingle + ')',
bygroups(Keyword, Text, String.Single), 'itemtype'),
(r'case|as', Keyword, 'itemtype'),
(r'(\))(\s*)(as)', bygroups(Operator, Text, Keyword), 'itemtype'),
(ncname + r':\*', Keyword.Type, 'operator'),
(r'(function|map|array)(\()', bygroups(Keyword.Type, Punctuation)),
(qname, Keyword.Type, 'occurrenceindicator'),
],
'kindtest': [
(r'\(:', Comment, 'comment'),
(r'\{', Punctuation, 'root'),
(r'(\))([*+?]?)', popstate_kindtest_callback),
(r'\*', Name, 'closekindtest'),
(qname, Name, 'closekindtest'),
(r'(element|schema-element)(\s*)(\()', pushstate_kindtest_callback),
],
'kindtestforpi': [
(r'\(:', Comment, 'comment'),
(r'\)', Punctuation, '#pop'),
(ncname, Name.Variable),
(stringdouble, String.Double),
(stringsingle, String.Single),
],
'closekindtest': [
(r'\(:', Comment, 'comment'),
(r'(\))', popstate_callback),
(r',', Punctuation),
(r'(\{)', pushstate_operator_root_callback),
(r'\?', Punctuation),
],
'xml_comment': [
(r'(-->)', popstate_xmlcomment_callback),
(r'[^-]{1,2}', Literal),
(u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'processing_instruction': [
(r'\s+', Text, 'processing_instruction_content'),
(r'\?>', String.Doc, '#pop'),
(pitarget, Name),
],
'processing_instruction_content': [
(r'\?>', String.Doc, '#pop'),
(u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'cdata_section': [
(r']]>', String.Doc, '#pop'),
(u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'start_tag': [
include('whitespace'),
(r'(/>)', popstate_tag_callback),
(r'>', Name.Tag, 'element_content'),
(r'"', Punctuation, 'quot_attribute_content'),
(r"'", Punctuation, 'apos_attribute_content'),
(r'=', Operator),
(qname, Name.Tag),
],
'quot_attribute_content': [
(r'"', Punctuation, 'start_tag'),
(r'(\{)', pushstate_root_callback),
(r'""', Name.Attribute),
(quotattrcontentchar, Name.Attribute),
(entityref, Name.Attribute),
(charref, Name.Attribute),
(r'\{\{|\}\}', Name.Attribute),
],
'apos_attribute_content': [
(r"'", Punctuation, 'start_tag'),
(r'\{', Punctuation, 'root'),
(r"''", Name.Attribute),
(aposattrcontentchar, Name.Attribute),
(entityref, Name.Attribute),
(charref, Name.Attribute),
(r'\{\{|\}\}', Name.Attribute),
],
'element_content': [
(r'</', Name.Tag, 'end_tag'),
(r'(\{)', pushstate_root_callback),
(r'(<!--)', pushstate_element_content_xmlcomment_callback),
(r'(<\?)', pushstate_element_content_processing_instruction_callback),
(r'(<!\[CDATA\[)', pushstate_element_content_cdata_section_callback),
(r'(<)', pushstate_element_content_starttag_callback),
(elementcontentchar, Literal),
(entityref, Literal),
(charref, Literal),
(r'\{\{|\}\}', Literal),
],
'end_tag': [
include('whitespace'),
(r'(>)', popstate_tag_callback),
(qname, Name.Tag),
],
'xmlspace_decl': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'preserve|strip', Keyword, '#pop'),
],
'declareordering': [
(r'\(:', Comment, 'comment'),
include('whitespace'),
(r'ordered|unordered', Keyword, '#pop'),
],
'xqueryversion': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(stringdouble, String.Double),
(stringsingle, String.Single),
(r'encoding', Keyword),
(r';', Punctuation, '#pop'),
],
'pragma': [
(qname, Name.Variable, 'pragmacontents'),
],
'pragmacontents': [
(r'#\)', Punctuation, 'operator'),
(u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
(r'(\s+)', Text),
],
'occurrenceindicator': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'\*|\?|\+', Operator, 'operator'),
(r':=', Operator, 'root'),
default('operator'),
],
'option': [
include('whitespace'),
(qname, Name.Variable, '#pop'),
],
'qname_braren': [
include('whitespace'),
(r'(\{)', pushstate_operator_root_callback),
(r'(\()', Punctuation, 'root'),
],
'element_qname': [
(qname, Name.Variable, 'root'),
],
'attribute_qname': [
(qname, Name.Variable, 'root'),
],
'root': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
# handle operator state
# order on numbers matters - handle most complex first
(r'\d+(\.\d*)?[eE][+-]?\d+', Number.Float, 'operator'),
(r'(\.\d+)[eE][+-]?\d+', Number.Float, 'operator'),
(r'(\.\d+|\d+\.\d*)', Number.Float, 'operator'),
(r'(\d+)', Number.Integer, 'operator'),
(r'(\.\.|\.|\))', Punctuation, 'operator'),
(r'(declare)(\s+)(construction)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration), 'operator'),
(r'(declare)(\s+)(default)(\s+)(order)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration, Text, Keyword.Declaration), 'operator'),
(r'(declare)(\s+)(context)(\s+)(item)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration, Text, Keyword.Declaration), 'operator'),
(ncname + r':\*', Name, 'operator'),
(r'\*:'+ncname, Name.Tag, 'operator'),
(r'\*', Name.Tag, 'operator'),
(stringdouble, String.Double, 'operator'),
(stringsingle, String.Single, 'operator'),
(r'(\}|\])', popstate_callback),
# NAMESPACE DECL
(r'(declare)(\s+)(default)(\s+)(collation)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration, Text, Keyword.Declaration)),
(r'(module|declare)(\s+)(namespace)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration), 'namespacedecl'),
(r'(declare)(\s+)(base-uri)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration), 'namespacedecl'),
# NAMESPACE KEYWORD
(r'(declare)(\s+)(default)(\s+)(element|function)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration, Text, Keyword.Declaration),
'namespacekeyword'),
(r'(import)(\s+)(schema|module)',
bygroups(Keyword.Pseudo, Text, Keyword.Pseudo), 'namespacekeyword'),
(r'(declare)(\s+)(copy-namespaces)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration), 'namespacekeyword'),
# VARNAMEs
(r'(for|let|some|every)(\s+)(\$)',
bygroups(Keyword, Text, Name.Variable), 'varname'),
(r'(for)(\s+)(tumbling|sliding)(\s+)(window)(\s+)(\$)',
bygroups(Keyword, Text, Keyword, Text, Keyword, Text, Name.Variable), 'varname'),
(r'\$', Name.Variable, 'varname'),
(r'(declare)(\s+)(variable)(\s+)(\$)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration, Text, Name.Variable), 'varname'),
# ANNOTATED GLOBAL VARIABLES AND FUNCTIONS
(r'(declare)(\s+)(\%)', bygroups(Keyword.Declaration, Text, Name.Decorator), 'annotationname'),
# ITEMTYPE
(r'(\))(\s+)(as)', bygroups(Operator, Text, Keyword), 'itemtype'),
(r'(element|attribute|schema-element|schema-attribute|comment|'
r'text|node|document-node|empty-sequence)(\s+)(\()',
pushstate_operator_kindtest_callback),
(r'(processing-instruction)(\s+)(\()',
pushstate_operator_kindtestforpi_callback),
(r'(<!--)', pushstate_operator_xmlcomment_callback),
(r'(<\?)', pushstate_operator_processing_instruction_callback),
(r'(<!\[CDATA\[)', pushstate_operator_cdata_section_callback),
# (r'</', Name.Tag, 'end_tag'),
(r'(<)', pushstate_operator_starttag_callback),
(r'(declare)(\s+)(boundary-space)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration), 'xmlspace_decl'),
(r'(validate)(\s+)(lax|strict)',
pushstate_operator_root_validate_withmode),
(r'(validate)(\s*)(\{)', pushstate_operator_root_validate),
(r'(typeswitch)(\s*)(\()', bygroups(Keyword, Text, Punctuation)),
(r'(switch)(\s*)(\()', bygroups(Keyword, Text, Punctuation)),
(r'(element|attribute|namespace)(\s*)(\{)',
pushstate_operator_root_construct_callback),
(r'(document|text|processing-instruction|comment)(\s*)(\{)',
pushstate_operator_root_construct_callback),
# ATTRIBUTE
(r'(attribute)(\s+)(?=' + qname + r')',
bygroups(Keyword, Text), 'attribute_qname'),
# ELEMENT
(r'(element)(\s+)(?=' + qname + r')',
bygroups(Keyword, Text), 'element_qname'),
# PROCESSING_INSTRUCTION
(r'(processing-instruction|namespace)(\s+)(' + ncname + r')(\s*)(\{)',
bygroups(Keyword, Text, Name.Variable, Text, Punctuation),
'operator'),
(r'(declare|define)(\s+)(function)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration)),
(r'(\{|\[)', pushstate_operator_root_callback),
(r'(unordered|ordered)(\s*)(\{)',
pushstate_operator_order_callback),
(r'(map|array)(\s*)(\{)',
pushstate_operator_map_callback),
(r'(declare)(\s+)(ordering)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration), 'declareordering'),
(r'(xquery)(\s+)(version)',
bygroups(Keyword.Pseudo, Text, Keyword.Pseudo), 'xqueryversion'),
(r'(\(#)(\s*)', bygroups(Punctuation, Text), 'pragma'),
# sometimes return can occur in root state
(r'return', Keyword),
(r'(declare)(\s+)(option)', bygroups(Keyword.Declaration, Text, Keyword.Declaration),
'option'),
# URI LITERALS - single and double quoted
(r'(at)(\s+)('+stringdouble+')', String.Double, 'namespacedecl'),
(r'(at)(\s+)('+stringsingle+')', String.Single, 'namespacedecl'),
(r'(ancestor-or-self|ancestor|attribute|child|descendant-or-self)(::)',
bygroups(Keyword, Punctuation)),
(r'(descendant|following-sibling|following|parent|preceding-sibling'
r'|preceding|self)(::)', bygroups(Keyword, Punctuation)),
(r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation)),
(r'then|else', Keyword),
# eXist specific XQUF
(r'(update)(\s*)(insert|delete|replace|value|rename)', bygroups(Keyword, Text, Keyword)),
(r'(into|following|preceding|with)', Keyword),
# Marklogic specific
(r'(try)(\s*)', bygroups(Keyword, Text), 'root'),
(r'(catch)(\s*)(\()(\$)',
bygroups(Keyword, Text, Punctuation, Name.Variable), 'varname'),
(r'(@'+qname+')', Name.Attribute, 'operator'),
(r'(@'+ncname+')', Name.Attribute, 'operator'),
(r'@\*:'+ncname, Name.Attribute, 'operator'),
(r'@\*', Name.Attribute, 'operator'),
(r'(@)', Name.Attribute, 'operator'),
(r'//|/|\+|-|;|,|\(|\)', Punctuation),
# STANDALONE QNAMES
(qname + r'(?=\s*\{)', Name.Tag, 'qname_braren'),
(qname + r'(?=\s*\([^:])', Name.Function, 'qname_braren'),
(r'(' + qname + ')(#)([0-9]+)', bygroups(Name.Function, Keyword.Type, Number.Integer)),
(qname, Name.Tag, 'operator'),
]
}
class QmlLexer(RegexLexer):
"""
For QML files. See http://doc.qt.digia.com/4.7/qdeclarativeintroduction.html.
.. versionadded:: 1.6
"""
# QML is based on javascript, so much of this is taken from the
# JavascriptLexer above.
name = 'QML'
aliases = ['qml', 'qbs']
filenames = ['*.qml', '*.qbs']
mimetypes = ['application/x-qml', 'application/x-qt.qbs+qml']
# pasted from JavascriptLexer, with some additions
flags = re.DOTALL | re.MULTILINE
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'<!--', Comment),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
default('#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
# QML insertions
(r'\bid\s*:\s*[A-Za-z][\w.]*', Keyword.Declaration,
'slashstartsregex'),
(r'\b[A-Za-z][\w.]*\s*:', Keyword, 'slashstartsregex'),
# the rest from JavascriptLexer
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'this)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
r'extends|final|float|goto|implements|import|int|interface|long|native|'
r'package|private|protected|public|short|static|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(r'[$a-zA-Z_]\w*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class CirruLexer(RegexLexer):
r"""
Syntax rules of Cirru can be found at:
http://cirru.org/
* using ``()`` for expressions, but restricted in a same line
* using ``""`` for strings, with ``\`` for escaping chars
* using ``$`` as folding operator
* using ``,`` as unfolding operator
* using indentations for nested blocks
.. versionadded:: 2.0
"""
name = 'Cirru'
aliases = ['cirru']
filenames = ['*.cirru']
mimetypes = ['text/x-cirru']
flags = re.MULTILINE
tokens = {
'string': [
(r'[^"\\\n]', String),
(r'\\', String.Escape, 'escape'),
(r'"', String, '#pop'),
],
'escape': [
(r'.', String.Escape, '#pop'),
],
'function': [
(r'\,', Operator, '#pop'),
(r'[^\s"()]+', Name.Function, '#pop'),
(r'\)', Operator, '#pop'),
(r'(?=\n)', Text, '#pop'),
(r'\(', Operator, '#push'),
(r'"', String, ('#pop', 'string')),
(r'[ ]+', Text.Whitespace),
],
'line': [
(r'(?<!\w)\$(?!\w)', Operator, 'function'),
(r'\(', Operator, 'function'),
(r'\)', Operator),
(r'\n', Text, '#pop'),
(r'"', String, 'string'),
(r'[ ]+', Text.Whitespace),
(r'[+-]?[\d.]+\b', Number),
(r'[^\s"()]+', Name.Variable)
],
'root': [
(r'^\n+', Text.Whitespace),
default(('line', 'function')),
]
}
class SlimLexer(ExtendedRegexLexer):
"""
For Slim markup.
.. versionadded:: 2.0
"""
name = 'Slim'
aliases = ['slim']
filenames = ['*.slim']
mimetypes = ['text/x-slim']
flags = re.IGNORECASE
_dot = r'(?: \|\n(?=.* \|)|.)'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'([ \t]*==?)(.*\n)',
bygroups(Punctuation, using(RubyLexer)),
'root'),
(r'[ \t]+[\w:-]+(?==)', Name.Attribute, 'html-attributes'),
default('plain'),
],
'content': [
include('css'),
(r'[\w:-]+:[ \t]*\n', Text, 'plain'),
(r'(-)(.*\n)',
bygroups(Punctuation, using(RubyLexer)),
'#pop'),
(r'\|' + _dot + r'*\n', _starts_block(Text, 'plain'), '#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment.Preproc, 'slim-comment-block'), '#pop'),
(r'[\w:-]+', Name.Tag, 'tag'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
(r'[ \t]+\n', Punctuation, '#pop:2'),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(.*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'=', Punctuation),
(r'"[^"]+"', using(RubyLexer), 'tag'),
(r'\'[^\']+\'', using(RubyLexer), 'tag'),
(r'\w+', Text, 'tag'),
],
'slim-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
}
|
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
from __future__ import print_function
import curses
import sys
import signal
import processInput
import usageStrings
import output
import logger
from charCodeMapping import CODE_TO_CHAR
from colorPrinter import ColorPrinter
def signal_handler(signal, frame):
# from http://stackoverflow.com/a/1112350/948126
# Lets just quit rather than signal.SIGINT printing the stack
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
CHROME_MIN_X = 5
CHROME_MIN_Y = 0
SELECT_MODE = 'SELECT'
COMMAND_MODE = 'COMMAND_MODE'
SHORT_NAV_USAGE = '[f|A] selection, [down|j|up|k|space|b] navigation, [enter] open, [c] command mode'
SHORT_COMMAND_USAGE = 'command examples: | git add | git checkout HEAD~1 -- | mv $F ../here/ |'
SHORT_COMMAND_PROMPT = 'Type a command below! Files will be appended or replace $F'
SHORT_COMMAND_PROMPT2 = 'Enter a blank line to go back to the selection process'
SHORT_FILES_HEADER = 'Files you have selected:'
INVISIBLE_CURSOR = 0
BLOCK_CURSOR = 2
class HelperChrome(object):
def __init__(self, printer, screenControl):
self.printer = printer
self.screenControl = screenControl
self.WIDTH = 50
if self.getIsSidebarMode():
logger.addEvent('init_wide_mode')
else:
logger.addEvent('init_narrow_mode')
def output(self, mode):
self.mode = mode
for func in [self.outputSide, self.outputBottom, self.toggleCursor]:
try:
func()
except curses.error:
pass
def toggleCursor(self):
if self.mode == SELECT_MODE:
curses.curs_set(INVISIBLE_CURSOR)
else:
curses.curs_set(BLOCK_CURSOR)
def reduceMaxY(self, maxy):
if self.getIsSidebarMode():
return maxy
return maxy - 4
def reduceMaxX(self, maxx):
if not self.getIsSidebarMode():
return maxx
return maxx - self.WIDTH
def getMinX(self):
if self.mode == COMMAND_MODE:
return 0
return self.screenControl.getChromeBoundaries()[0]
def getMinY(self):
return self.screenControl.getChromeBoundaries()[1]
def getIsSidebarMode(self):
(maxy, maxx) = self.screenControl.getScreenDimensions()
return maxx > 200
def outputSide(self):
if not self.getIsSidebarMode():
return
(maxy, maxx) = self.screenControl.getScreenDimensions()
borderX = maxx - self.WIDTH
if (self.mode == COMMAND_MODE):
borderX = len(SHORT_COMMAND_PROMPT) + 20
usageLines = usageStrings.USAGE_PAGE.split('\n')
if self.mode == COMMAND_MODE:
usageLines = usageStrings.USAGE_COMMAND.split('\n')
for index, usageLine in enumerate(usageLines):
self.printer.addstr(self.getMinY() + index, borderX + 2, usageLine)
for y in range(self.getMinY(), maxy):
self.printer.addstr(y, borderX, '|')
def outputBottom(self):
if self.getIsSidebarMode():
return
(maxy, maxx) = self.screenControl.getScreenDimensions()
borderY = maxy - 2
# first output text since we might throw an exception during border
usageStr = SHORT_NAV_USAGE if self.mode == SELECT_MODE else SHORT_COMMAND_USAGE
borderStr = '_' * (maxx - self.getMinX() - 0)
self.printer.addstr(borderY, self.getMinX(), borderStr)
self.printer.addstr(borderY + 1, self.getMinX(), usageStr)
class ScrollBar(object):
def __init__(self, printer, lines, screenControl):
self.printer = printer
self.screenControl = screenControl
self.numLines = len(lines)
self.boxStartFraction = 0.0
self.boxStopFraction = 0.0
self.calcBoxFractions()
# see if we are activated
self.activated = True
(maxy, maxx) = self.screenControl.getScreenDimensions()
if (self.numLines < maxy):
self.activated = False
logger.addEvent('no_scrollbar')
else:
logger.addEvent('needed_scrollbar')
def getIsActivated(self):
return self.activated
def calcBoxFractions(self):
# what we can see is basically the fraction of our screen over
# total num lines
(maxy, maxx) = self.screenControl.getScreenDimensions()
fracDisplayed = min(1.0, (maxy / float(self.numLines)))
self.boxStartFraction = -self.screenControl.getScrollOffset() / float(
self.numLines)
self.boxStopFraction = self.boxStartFraction + fracDisplayed
def output(self):
if not self.activated:
return
for func in [self.outputCaps, self.outputBase, self.outputBox,
self.outputBorder]:
try:
func()
except curses.error:
pass
def getMinY(self):
return self.screenControl.getChromeBoundaries()[1] + 1
def getX(self):
return 0
def outputBorder(self):
x = self.getX() + 4
(maxy, maxx) = self.screenControl.getScreenDimensions()
for y in range(0, maxy):
self.printer.addstr(y, x, ' ')
def outputBox(self):
(maxy, maxx) = self.screenControl.getScreenDimensions()
topY = maxy - 2
minY = self.getMinY()
diff = topY - minY
x = self.getX()
boxStartY = int(diff * self.boxStartFraction) + minY
boxStopY = int(diff * self.boxStopFraction) + minY
self.printer.addstr(boxStartY, x, '/-\\')
for y in range(boxStartY + 1, boxStopY):
self.printer.addstr(y, x, '|-|')
self.printer.addstr(boxStopY, x, '\-/')
def outputCaps(self):
x = self.getX()
(maxy, maxx) = self.screenControl.getScreenDimensions()
for y in [self.getMinY() - 1, maxy - 1]:
self.printer.addstr(y, x, '===')
def outputBase(self):
x = self.getX()
(maxy, maxx) = self.screenControl.getScreenDimensions()
for y in range(self.getMinY(), maxy - 1):
self.printer.addstr(y, x, ' . ')
class Controller(object):
def __init__(self, flags, stdscr, lineObjs, cursesAPI):
self.stdscr = stdscr
self.cursesAPI = cursesAPI
self.cursesAPI.useDefaultColors()
self.colorPrinter = ColorPrinter(self.stdscr, cursesAPI)
self.flags = flags
self.lineObjs = lineObjs
self.hoverIndex = 0
self.scrollOffset = 0
self.scrollBar = ScrollBar(self.colorPrinter, lineObjs, self)
self.helperChrome = HelperChrome(self.colorPrinter, self)
(self.oldmaxy, self.oldmaxx) = self.getScreenDimensions()
self.mode = SELECT_MODE
self.simpleLines = []
self.lineMatches = []
# lets loop through and split
for lineObj in self.lineObjs.values():
lineObj.controller = self
if (lineObj.isSimple()):
self.simpleLines.append(lineObj)
else:
self.lineMatches.append(lineObj)
self.numLines = len(lineObjs.keys())
self.numMatches = len(self.lineMatches)
self.setHover(self.hoverIndex, True)
# the scroll offset might not start off
# at 0 if our first real match is WAY
# down the screen -- so lets init it to
# a valid value after we have all our line objects
self.updateScrollOffset()
logger.addEvent('init')
def getScrollOffset(self):
return self.scrollOffset
def getScreenDimensions(self):
return self.stdscr.getmaxyx()
def getChromeBoundaries(self):
(maxy, maxx) = self.stdscr.getmaxyx()
minx = CHROME_MIN_X if self.scrollBar.getIsActivated() else 0
maxy = self.helperChrome.reduceMaxY(maxy)
maxx = self.helperChrome.reduceMaxX(maxx)
# format of (MINX, MINY, MAXX, MAXY)
return (minx, CHROME_MIN_Y, maxx, maxy)
def getViewportHeight(self):
(minx, miny, maxx, maxy) = self.getChromeBoundaries()
return maxy - miny
def setHover(self, index, val):
self.lineMatches[index].setHover(val)
def toggleSelect(self):
self.dirtyHoverIndex()
self.lineMatches[self.hoverIndex].toggleSelect()
def toggleSelectAll(self):
files = set()
for line in self.lineMatches:
if line.getFile() not in files:
files.add(line.getFile())
line.toggleSelect()
self.dirtyLines()
def setSelect(self, val):
self.lineMatches[self.hoverIndex].setSelect(val)
def control(self):
# we start out by printing everything we need to
self.printAll()
self.resetDirty()
self.moveCursor()
while True:
inKey = self.getKey()
self.checkResize()
self.processInput(inKey)
self.processDirty()
self.resetDirty()
self.moveCursor()
self.stdscr.refresh()
def checkResize(self):
(maxy, maxx) = self.getScreenDimensions()
if (maxy is not self.oldmaxy or maxx is not self.oldmaxx):
# we resized so print all!
self.printAll()
self.resetDirty()
self.stdscr.refresh()
logger.addEvent('resize')
(self.oldmaxy, self.oldmaxx) = self.getScreenDimensions()
def updateScrollOffset(self):
"""
yay scrolling logic! we will start simple here
and basically just center the viewport to current
matched line
"""
windowHeight = self.getViewportHeight()
halfHeight = int(round(windowHeight / 2.0))
# important, we need to get the real SCREEN position
# of the hover index, not its index within our matches
hovered = self.lineMatches[self.hoverIndex]
desiredTopRow = hovered.getScreenIndex() - halfHeight
oldOffset = self.scrollOffset
desiredTopRow = max(desiredTopRow, 0)
newOffset = -desiredTopRow
# lets add in some leeway -- dont bother repositioning
# if the old offset is within 1/2 of the window height
# of our desired (unless we absolutely have to)
if abs(newOffset -
oldOffset) > halfHeight / 2 or self.hoverIndex + oldOffset < 0:
# need to reassign now we have gone too far
self.scrollOffset = newOffset
if oldOffset is not self.scrollOffset:
self.dirtyLines()
# also update our scroll bar
self.scrollBar.calcBoxFractions()
def pageDown(self):
pageHeight = (int)(self.getViewportHeight() * 0.5)
self.moveIndex(pageHeight)
def pageUp(self):
pageHeight = (int)(self.getViewportHeight() * 0.5)
self.moveIndex(-pageHeight)
def moveIndex(self, delta):
newIndex = (self.hoverIndex + delta) % self.numMatches
self.jumpToIndex(newIndex)
def jumpToIndex(self, newIndex):
self.setHover(self.hoverIndex, False)
self.dirtyHoverIndex()
self.hoverIndex = newIndex
self.setHover(self.hoverIndex, True)
self.dirtyHoverIndex()
self.updateScrollOffset()
def processInput(self, key):
if key == 'UP' or key == 'k':
self.moveIndex(-1)
elif key == 'DOWN' or key == 'j':
self.moveIndex(1)
elif key == 'c':
self.beginEnterCommand()
elif key == ' ' or key == 'PAGE_DOWN':
self.pageDown()
elif key == 'b' or key == 'PAGE_UP':
self.pageUp()
elif key == 'g':
self.jumpToIndex(0)
elif key == 'G':
self.jumpToIndex(self.numMatches - 1)
elif key == 'f':
self.toggleSelect()
elif key == 'A':
self.toggleSelectAll()
elif key == 'ENTER':
self.onEnter()
elif key == 'q':
output.outputNothing()
# this will get the appropriate selection and save it to a file for reuse
# before exiting the program
self.getFilesToUse()
self.cursesAPI.exit()
pass
def getFilesToUse(self):
# if we have select files, those, otherwise hovered
toUse = self.getSelectedFiles()
if not toUse:
toUse = self.getHoveredFiles()
# save the selection we are using
if self.cursesAPI.allowFileOutput():
output.outputSelection(toUse)
return toUse
def getSelectedFiles(self):
return [lineObj for (index, lineObj) in enumerate(self.lineMatches)
if lineObj.getSelected()]
def getHoveredFiles(self):
return [lineObj for (index, lineObj) in enumerate(self.lineMatches)
if index == self.hoverIndex]
def showAndGetCommand(self):
fileObjs = self.getFilesToUse()
files = [fileObj.getFile() for fileObj in fileObjs]
(maxy, maxx) = self.getScreenDimensions()
halfHeight = int(round(maxy / 2) - len(files) / 2.0)
borderLine = '=' * len(SHORT_COMMAND_PROMPT)
promptLine = '.' * len(SHORT_COMMAND_PROMPT)
# from helper chrome code
maxFileLength = maxx - 5
if self.helperChrome.getIsSidebarMode():
# need to be shorter to not go into side bar
maxFileLength = len(SHORT_COMMAND_PROMPT) + 18
# first lets print all the files
startHeight = halfHeight - 1 - len(files)
try:
self.stdscr.addstr(startHeight - 3, 0, borderLine)
self.stdscr.addstr(startHeight - 2, 0, SHORT_FILES_HEADER)
self.stdscr.addstr(startHeight - 1, 0, borderLine)
for index, file in enumerate(files):
self.stdscr.addstr(startHeight + index, 0,
file[0:maxFileLength])
except curses.error:
pass
# first print prompt
try:
self.stdscr.addstr(halfHeight, 0, SHORT_COMMAND_PROMPT)
self.stdscr.addstr(halfHeight + 1, 0, SHORT_COMMAND_PROMPT2)
except curses.error:
pass
# then line to distinguish and prompt line
try:
self.stdscr.addstr(halfHeight - 1, 0, borderLine)
self.stdscr.addstr(halfHeight + 2, 0, borderLine)
self.stdscr.addstr(halfHeight + 3, 0, promptLine)
except curses.error:
pass
self.stdscr.refresh()
self.cursesAPI.echo()
maxX = int(round(maxx - 1))
command = self.stdscr.getstr(halfHeight + 3, 0, maxX)
return command
def beginEnterCommand(self):
self.stdscr.clear()
# first check if they are trying to enter command mode
# but already have a command...
if len(self.flags.getPresetCommand()):
self.helperChrome.output(self.mode)
(_, minY, _, maxY) = self.getChromeBoundaries()
yStart = (maxY + minY) / 2 - 3
self.printProvidedCommandWarning(yStart)
self.getKey()
self.mode = SELECT_MODE
self.dirtyLines()
return
self.mode = COMMAND_MODE
self.helperChrome.output(self.mode)
logger.addEvent('enter_command_mode')
command = self.showAndGetCommand()
if len(command) == 0:
# go back to selection mode and repaint
self.mode = SELECT_MODE
self.cursesAPI.noecho()
self.dirtyLines()
logger.addEvent('exit_command_mode')
return
lineObjs = self.getFilesToUse()
output.execComposedCommand(command, lineObjs)
sys.exit(0)
def onEnter(self):
lineObjs = self.getFilesToUse()
if not lineObjs:
# nothing selected, assume we want hovered
lineObjs = self.getHoveredFiles()
logger.addEvent('selected_num_files', len(lineObjs))
# commands passed from the command line get used immediately
presetCommand = self.flags.getPresetCommand()
if len(presetCommand) > 0:
output.execComposedCommand(presetCommand, lineObjs)
else:
output.editFiles(lineObjs)
sys.exit(0)
def resetDirty(self):
# reset all dirty state for our components
self.linesDirty = False
self.dirtyIndexes = []
def dirtyHoverIndex(self):
self.dirtyIndexes.append(self.hoverIndex)
def dirtyLines(self):
self.linesDirty = True
def processDirty(self):
if self.linesDirty:
self.printAll()
for index in self.dirtyIndexes:
self.lineMatches[index].output(self.colorPrinter)
if self.helperChrome.getIsSidebarMode():
# need to output since lines can override
# the sidebar stuff
self.printChrome()
def printAll(self):
self.stdscr.clear()
self.printLines()
self.printScroll()
self.printChrome()
def printLines(self):
for lineObj in self.lineObjs.values():
lineObj.output(self.colorPrinter)
def printScroll(self):
self.scrollBar.output()
def printProvidedCommandWarning(self, yStart):
self.colorPrinter.setAttributes(
curses.COLOR_WHITE, curses.COLOR_RED, 0)
self.stdscr.addstr(yStart, 0, 'Oh no! You already provided a command so ' +
'you cannot enter command mode.')
self.stdscr.attrset(0)
self.stdscr.addstr(
yStart + 1, 0, 'The command you provided was "%s" ' % self.flags.getPresetCommand())
self.stdscr.addstr(
yStart + 2, 0, 'Press any key to go back to selecting files.')
def printChrome(self):
self.helperChrome.output(self.mode)
def moveCursor(self):
x = CHROME_MIN_X if self.scrollBar.getIsActivated() else 0
y = self.lineMatches[
self.hoverIndex].getScreenIndex() + self.scrollOffset
self.stdscr.move(y, x)
def getKey(self):
charCode = self.stdscr.getch()
return CODE_TO_CHAR.get(charCode, '')
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reshape bijectors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"Reshape",
]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _static_ndims_from_shape(shape):
return shape.shape.with_rank_at_least(1)[0].value
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _ndims_from_shape(shape):
return array_ops.shape(shape)[0]
class Reshape(bijector.Bijector):
"""Reshapes the `event_shape` of a `Tensor`.
The semantics generally follow that of `tf.reshape()`, with
a few differences:
* The user must provide both the input and output shape, so that
the transformation can be inverted. If an input shape is not
specified, the default assumes a vector-shaped input, i.e.,
event_shape_in = (-1,).
* The `Reshape` bijector automatically broadcasts over the leftmost
dimensions of its input (`sample_shape` and `batch_shape`); only
the rightmost `event_ndims_in` dimensions are reshaped. The
number of dimensions to reshape is inferred from the provided
`event_shape_in` (`event_ndims_in = len(event_shape_in)`).
Example usage:
```python
import tensorflow_probability as tfp
tfb = tfp.bijectors
r = tfb.Reshape(event_shape_out=[1, -1])
r.forward([3., 4.]) # shape [2]
# ==> [[3., 4.]] # shape [1, 2]
r.forward([[1., 2.], [3., 4.]]) # shape [2, 2]
# ==> [[[1., 2.]],
# [[3., 4.]]] # shape [2, 1, 2]
r.inverse([[3., 4.]]) # shape [1,2]
# ==> [3., 4.] # shape [2]
r.forward_log_det_jacobian(any_value)
# ==> 0.
r.inverse_log_det_jacobian(any_value)
# ==> 0.
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self, event_shape_out, event_shape_in=(-1,),
validate_args=False, name=None):
"""Creates a `Reshape` bijector.
Args:
event_shape_out: An `int`-like vector-shaped `Tensor`
representing the event shape of the transformed output.
event_shape_in: An optional `int`-like vector-shape `Tensor`
representing the event shape of the input. This is required in
order to define inverse operations; the default of (-1,)
assumes a vector-shaped input.
validate_args: Python `bool` indicating whether arguments should
be checked for correctness.
name: Python `str`, name given to ops managed by this object.
Raises:
TypeError: if either `event_shape_in` or `event_shape_out` has
non-integer `dtype`.
ValueError: if either of `event_shape_in` or `event_shape_out`
has non-vector shape (`rank > 1`), or if their sizes do not
match.
"""
with ops.name_scope(name, "reshape",
values=[event_shape_out, event_shape_in]):
event_shape_out = ops.convert_to_tensor(event_shape_out,
name="event_shape_out",
preferred_dtype=dtypes.int32)
event_shape_in = ops.convert_to_tensor(event_shape_in,
name="event_shape_in",
preferred_dtype=dtypes.int32)
assertions = []
assertions.extend(self._maybe_check_valid_shape(
event_shape_out, validate_args))
assertions.extend(self._maybe_check_valid_shape(
event_shape_in, validate_args))
self._assertions = assertions
self._event_shape_in = event_shape_in
self._event_shape_out = event_shape_out
super(Reshape, self).__init__(
forward_min_event_ndims=0,
is_constant_jacobian=True,
validate_args=validate_args,
name=name or "reshape")
def _maybe_check_valid_shape(self, shape, validate_args):
"""Check that a shape Tensor is int-type and otherwise sane."""
if not shape.dtype.is_integer:
raise TypeError("{} dtype ({}) should be `int`-like.".format(
shape, shape.dtype.name))
assertions = []
ndims = array_ops.rank(shape)
ndims_ = tensor_util.constant_value(ndims)
if ndims_ is not None and ndims_ > 1:
raise ValueError("`{}` rank ({}) should be <= 1.".format(
shape, ndims_))
elif validate_args:
assertions.append(check_ops.assert_less_equal(
ndims, 1, message="`{}` rank should be <= 1.".format(shape)))
shape_ = tensor_util.constant_value_as_shape(shape)
if shape_.is_fully_defined():
es = np.int32(shape_.as_list())
if sum(es == -1) > 1:
raise ValueError(
"`{}` must have at most one `-1` (given {})"
.format(shape, es))
if np.any(es < -1):
raise ValueError(
"`{}` elements must be either positive integers or `-1`"
"(given {})."
.format(shape, es))
elif validate_args:
assertions.extend([
check_ops.assert_less_equal(
math_ops.reduce_sum(
math_ops.cast(math_ops.equal(shape, -1), dtypes.int32)),
1,
message="`{}` elements must have at most one `-1`."
.format(shape)),
check_ops.assert_greater_equal(
shape, -1,
message="`{}` elements must be either positive integers or `-1`."
.format(shape)),
])
return assertions
def _reshape_helper(self, x, event_shape_in, event_shape_out):
"""Reshape only the event_shape of an input `Tensor`."""
event_ndims_in_ = _static_ndims_from_shape(event_shape_in)
event_ndims_in = _ndims_from_shape(event_shape_in)
x_ndims_, x_ndims = x.shape.ndims, array_ops.rank(x)
assertions = []
# Ensure x.event_shape is compatible with event_shape_in.
if (event_ndims_in_ is not None
and x_ndims_ is not None
and x.shape.with_rank_at_least(event_ndims_in_)[
x_ndims_-event_ndims_in_:].is_fully_defined()):
x_event_shape_, x_event_shape = [ # pylint: disable=unbalanced-tuple-unpacking
np.int32(x.shape[x_ndims_-event_ndims_in_:])]*2
else:
x_event_shape_, x_event_shape = (
None, array_ops.shape(x)[x_ndims-event_ndims_in:])
event_shape_in_ = tensor_util.constant_value(event_shape_in)
if x_event_shape_ is not None and event_shape_in_ is not None:
# Compare the shape dimensions that are fully specified in the
# input (i.e., for which event_shape_in is not -1). If x_event_shape
# matches along all of these dimensions, it is compatible with
# the desired input shape and any further mismatches (i.e.,
# imcompatibility with the desired *output* shape) will be
# caught inside of array_ops.reshape() below.
x_event_shape_specified_ = x_event_shape_[event_shape_in_ >= 0]
event_shape_in_specified_ = event_shape_in_[event_shape_in_ >= 0]
if not np.equal(x_event_shape_specified_,
event_shape_in_specified_).all():
raise ValueError(
"Input `event_shape` does not match `event_shape_in` ({} vs {}).".
format(x_event_shape_, event_shape_in_))
elif self.validate_args:
# Similarly to the static case, we compare the shape dimensions
# that are fully specified in the input. We extract these
# dimensions using boolean_mask(), which requires that the mask
# have known ndims. We can assume that shape Tensors always have
# ndims==1 (this assumption is verified inside of
# _maybe_check_valid_shape), so the reshape operation is just a
# no-op that formally encodes this fact to make boolean_mask()
# happy.
event_shape_mask = array_ops.reshape(event_shape_in >= 0, [-1])
x_event_shape_specified = array_ops.boolean_mask(x_event_shape,
event_shape_mask)
event_shape_in_specified = array_ops.boolean_mask(event_shape_in,
event_shape_mask)
assertions.append(check_ops.assert_equal(
x_event_shape_specified, event_shape_in_specified,
message="Input `event_shape` does not match `event_shape_in`."))
if assertions:
x = control_flow_ops.with_dependencies(assertions, x)
# get the parts of shape(x) that will not change
sample_and_batch_shape = array_ops.shape(x)
ndims = (x.shape.ndims if x.shape.ndims is not None
else array_ops.rank(x))
sample_and_batch_shape = sample_and_batch_shape[
:(ndims - math_ops.abs(event_ndims_in))]
if (event_ndims_in_ is not None
and x_ndims_ is not None
and event_ndims_in_ == x_ndims_):
# Hack to allow forward/inverse_event_shape to do shape
# inference by calling this helper method with a dummy Tensor of
# shape event_shape_in. In this special case,
# sample_and_batch_shape will be empty so we can preserve static
# shape information by avoiding the concat operation below
# (which would be a no-op).
new_shape = event_shape_out
else:
new_shape = array_ops.concat(
[sample_and_batch_shape, event_shape_out], axis=0)
return array_ops.reshape(x, new_shape)
def _forward(self, x):
with ops.control_dependencies(self._assertions):
return self._reshape_helper(x,
self._event_shape_in,
self._event_shape_out)
def _inverse(self, y):
with ops.control_dependencies(self._assertions):
return self._reshape_helper(y,
self._event_shape_out,
self._event_shape_in)
def _inverse_log_det_jacobian(self, y):
with ops.control_dependencies(self._assertions):
return constant_op.constant(0., dtype=y.dtype)
def _forward_log_det_jacobian(self, x):
with ops.control_dependencies(self._assertions):
return constant_op.constant(0., dtype=x.dtype)
def _forward_event_shape(self, input_shape):
# NOTE: this method and the other *_event_shape* methods
# compute shape by explicit transformation of a dummy
# variable. This approach is not generally recommended because it
# bloats the graph and could in general trigger side effects.
#
# In this particular case of the Reshape bijector, the
# forward and inverse transforms have no side effects, and we
# believe the reduction in code complexity from delegating the
# heavy lifting to tf.reshape() is worth the added graph ops.
# However, you should think hard before implementing this approach
# in other Bijectors; it is strongly preferred to compute
# shapes explicitly whenever it's feasible to do so.
with ops.control_dependencies(self._assertions):
dummy = array_ops.zeros(dtype=dtypes.float32, shape=input_shape)
dummy_reshaped = self.forward(dummy)
return dummy_reshaped.shape
def _inverse_event_shape(self, output_shape):
with ops.control_dependencies(self._assertions):
dummy = array_ops.zeros(dtype=dtypes.float32, shape=output_shape)
dummy_reshaped = self.inverse(dummy)
return dummy_reshaped.shape
def _forward_event_shape_tensor(self, input_shape):
with ops.control_dependencies(self._assertions):
dummy = array_ops.zeros(dtype=dtypes.float32, shape=input_shape)
dummy_reshaped = self.forward(dummy)
return array_ops.shape(dummy_reshaped)
def _inverse_event_shape_tensor(self, output_shape):
with ops.control_dependencies(self._assertions):
dummy = array_ops.zeros(dtype=dtypes.float32, shape=output_shape)
dummy_reshaped = self.inverse(dummy)
return array_ops.shape(dummy_reshaped)
|
|
# coding: utf-8
# imports
import os
import re
import itertools
from time import gmtime, strftime, localtime, time
from PIL import Image, ImageFile
# django imports
from django.core.files.storage import default_storage
from django.utils.encoding import smart_str
# filebrowser imports
from filebrowser.settings import (
ADMIN_VERSIONS, VERSIONS_BASEDIR, DEBUG, URL_FILEBROWSER_MEDIA,
PATH_FILEBROWSER_MEDIA, URL_TINYMCE, PATH_TINYMCE, EXTENSIONS,
SELECT_FORMATS, VERSIONS, ADMIN_THUMBNAIL, PREVIEW_VERSION,
MAX_UPLOAD_SIZE, CONVERT_FILENAME, STRICT_PIL, IMAGE_MAXBLOCK
)
from filebrowser.conf import fb_settings
import sys
_ver = sys.version_info
def url_to_path(value):
"""
Change URL to PATH.
Value has to be an URL relative to MEDIA URL or a full URL
(including MEDIA_URL).
Returns a PATH relative to MEDIA_ROOT.
"""
mediaurl_re = re.compile(r'^({0})'.format(fb_settings.MEDIA_URL))
value = mediaurl_re.sub('', value)
return value
def path_to_url(value):
"""
Change PATH to URL.
Value has to be a PATH relative to MEDIA_ROOT.
Return an URL relative to MEDIA_ROOT.
"""
mediaroot_re = re.compile(r'^({0})'.format(fb_settings.MEDIA_ROOT))
value = mediaroot_re.sub('', value)
return url_join(fb_settings.MEDIA_URL, value)
def dir_from_url(value):
"""
Get the relative server directory from a URL.
URL has to be an absolute URL including MEDIA_URL or
an URL relative to MEDIA_URL.
"""
mediaurl_re = re.compile(r'^({0})'.format(fb_settings.MEDIA_URL))
value = mediaurl_re.sub('', value)
directory_re = re.compile(r'^({0})'.format(fb_settings.DIRECTORY))
value = directory_re.sub('', value)
return os.path.split(value)[0]
def get_version_path(value, version_prefix):
"""
Construct the PATH to an Image version.
Value has to be server-path, relative to MEDIA_ROOT.
version_filename = filename + version_prefix + ext
Returns a path relative to MEDIA_ROOT.
"""
if os.path.isfile(smart_str(os.path.join(fb_settings.MEDIA_ROOT, value))):
path, filename = os.path.split(value)
filename, ext = os.path.splitext(filename)
# check if this file is a version of an other file
# to return filename_<version>.ext instead of
# filename_<version>_<version>.ext
tmp = filename.split("_")
if tmp[len(tmp)-1] in ADMIN_VERSIONS:
# it seems like the "original"
# is actually a version of an other original
# so we strip the suffix (aka. version_perfix)
new_filename = filename.replace("_" + tmp[len(tmp)-1], "")
# check if the version exists when we use the new_filename
if os.path.isfile(smart_str(os.path.join(fb_settings.MEDIA_ROOT, path, new_filename + "_" + version_prefix + ext))):
# our "original" filename seem to be
# filename_<version> construct
# so we replace it with the new_filename
filename = new_filename
# if a VERSIONS_BASEDIR is set we need to strip it
# from the path or we get
# a <VERSIONS_BASEDIR>/<VERSIONS_BASEDIR>/... construct
if VERSIONS_BASEDIR != "":
path = path.replace(VERSIONS_BASEDIR + "/", "")
version_filename = filename + "_" + version_prefix + ext
return os.path.join(VERSIONS_BASEDIR, path, version_filename)
else:
return None
def sort_by_attr(seq, attr):
"""
Sort the sequence of objects by object's attribute
Arguments:
seq - the list or any sequence (including immutable one)
of objects to sort.
attr - the name of attribute to sort by
Returns:
the sorted list of objects.
"""
import operator
# Use the "Schwartzian transform"
# Create the auxiliary list of tuples where every i-th tuple has form
# (seq[i].attr, i, seq[i]) and sort it.
# The second item of tuple is needed not only to provide stable sorting,
# but mainly to eliminate comparison of objects
# (which can be expensive or prohibited) in case of equal attribute values.
if _ver >= (3, 0):
intermed = map(
None,
map(
getattr,
seq,
(attr,)*len(seq)
),
itertools.zip_longest(range(len(seq)), seq)
)
try:
intermed = sorted(intermed)
# does this actually DO anything?
print(intermed)
return list(map(operator.getitem, intermed, (-1,) * len(intermed)))
except TypeError:
return seq
else:
intermed = map(
None, map(getattr, seq, (attr,)*len(seq)), range(len(seq)), seq
)
intermed.sort()
return map(operator.getitem, intermed, (-1,) * len(intermed))
def url_join(*args):
"""
URL join routine.
"""
if args[0].startswith("http://"):
url = "http://"
elif args[0].startswith("https://"):
url = "https://"
else:
url = "/"
for arg in args:
arg = arg.replace("\\", "/")
arg_split = arg.split("/")
for elem in arg_split:
if elem != "" and elem != "http:" and elem != "https:":
url = url + elem + "/"
# remove trailing slash for filenames
if os.path.splitext(args[-1])[1]:
url = url.rstrip("/")
return url
def get_path(path):
"""
Get Path.
"""
if path.startswith('.') or \
os.path.isabs(path) or \
not os.path.isdir(os.path.join(fb_settings.MEDIA_ROOT, fb_settings.DIRECTORY, path)):
return None
return path
def get_file(path, filename):
"""
Get File.
"""
converted_path = smart_str(
os.path.join(
fb_settings.MEDIA_ROOT,
fb_settings.DIRECTORY,
path,
filename
)
)
if not os.path.isfile(converted_path) and \
not os.path.isdir(converted_path):
return None
return filename
def get_breadcrumbs(query, path):
"""
Get breadcrumbs.
"""
breadcrumbs = []
dir_query = ""
if path:
for item in path.split(os.sep):
dir_query = os.path.join(dir_query, item)
breadcrumbs.append([item, dir_query])
return breadcrumbs
def get_filterdate(filter_date, date_time):
"""
Get filterdate.
"""
returnvalue = ''
date_year = strftime("%Y", gmtime(date_time))
date_month = strftime("%m", gmtime(date_time))
date_day = strftime("%d", gmtime(date_time))
if filter_date == 'today' and \
int(date_year) == int(localtime()[0]) and \
int(date_month) == int(localtime()[1]) and \
int(date_day) == int(localtime()[2]):
returnvalue = 'true'
elif filter_date == 'thismonth' and date_time >= time() - 2592000:
returnvalue = 'true'
elif filter_date == 'thisyear' and int(date_year) == int(localtime()[0]):
returnvalue = 'true'
elif filter_date == 'past7days' and date_time >= time()-604800:
returnvalue = 'true'
elif filter_date == '':
returnvalue = 'true'
return returnvalue
def get_settings_var():
"""
Get settings variables used for FileBrowser listing.
"""
settings_var = {
# Main
'DEBUG': DEBUG,
'MEDIA_ROOT': fb_settings.MEDIA_ROOT,
'MEDIA_URL': fb_settings.MEDIA_URL,
'DIRECTORY': fb_settings.DIRECTORY,
# FileBrowser
'URL_FILEBROWSER_MEDIA': URL_FILEBROWSER_MEDIA,
'PATH_FILEBROWSER_MEDIA': PATH_FILEBROWSER_MEDIA,
# TinyMCE
'URL_TINYMCE': URL_TINYMCE,
'PATH_TINYMCE': PATH_TINYMCE,
# Extensions/Formats (for FileBrowseField)
'EXTENSIONS': EXTENSIONS,
'SELECT_FORMATS': SELECT_FORMATS,
# Versions
'VERSIONS_BASEDIR': VERSIONS_BASEDIR,
'VERSIONS': VERSIONS,
'ADMIN_VERSIONS': ADMIN_VERSIONS,
'ADMIN_THUMBNAIL': ADMIN_THUMBNAIL,
'PREVIEW_VERSION': PREVIEW_VERSION,
# FileBrowser Options
'MAX_UPLOAD_SIZE': MAX_UPLOAD_SIZE,
# Convert Filenames
'CONVERT_FILENAME': CONVERT_FILENAME,
}
return settings_var
def handle_file_upload(path, file):
"""
Handle File Upload.
"""
file_path = os.path.join(path, file.name)
uploadedfile = default_storage.save(file_path, file)
return uploadedfile
def get_file_type(filename):
"""
Get file type as defined in EXTENSIONS.
"""
file_extension = os.path.splitext(filename)[1].lower()
file_type = ''
for k, v in EXTENSIONS.items():
for extension in v:
if file_extension == extension.lower():
file_type = k
return file_type
def is_selectable(filename, selecttype):
"""
Get select type as defined in FORMATS.
"""
file_extension = os.path.splitext(filename)[1].lower()
select_types = []
for k, v in SELECT_FORMATS.items():
for extension in v:
if file_extension == extension.lower():
select_types.append(k)
return select_types
def version_generator(value, version_prefix, force=None):
"""
Generate Version for an Image.
value has to be a serverpath relative to MEDIA_ROOT.
"""
ImageFile.MAXBLOCK = IMAGE_MAXBLOCK # default is 64k
try:
im = Image.open(smart_str(os.path.join(fb_settings.MEDIA_ROOT, value)))
version_path = get_version_path(value, version_prefix)
absolute_version_path = smart_str(
os.path.join(fb_settings.MEDIA_ROOT, version_path)
)
version_dir = os.path.split(absolute_version_path)[0]
if not os.path.isdir(version_dir):
os.makedirs(version_dir)
os.chmod(version_dir, 0o775)
version = scale_and_crop(
im,
VERSIONS[version_prefix]['width'],
VERSIONS[version_prefix]['height'],
VERSIONS[version_prefix]['opts']
)
try:
version.save(
absolute_version_path,
quality=90,
optimize=(os.path.splitext(version_path)[1].lower() != '.gif')
)
except IOError:
version.save(absolute_version_path, quality=90)
return version_path
except:
return None
def scale_and_crop(im, width, height, opts):
"""
Scale and Crop.
"""
x, y = [float(v) for v in im.size]
if width:
xr = float(width)
else:
xr = float(x * height / y)
if height:
yr = float(height)
else:
yr = float(y * width / x)
if 'crop' in opts:
r = max(xr / x, yr / y)
else:
r = min(xr / x, yr / y)
if r < 1.0 or (r > 1.0 and 'upscale' in opts):
im = im.resize((int(x * r), int(y * r)), resample=Image.ANTIALIAS)
if 'crop' in opts:
x, y = [float(v) for v in im.size]
ex, ey = (x - min(x, xr)) / 2, (y - min(y, yr)) / 2
if ex or ey:
im = im.crop((int(ex), int(ey), int(x - ex), int(y - ey)))
return im
scale_and_crop.valid_options = ('crop', 'upscale')
def convert_filename(value):
"""
Convert Filename.
"""
if CONVERT_FILENAME:
return value.replace(" ", "_").lower()
else:
return value
def _template():
if fb_settings.SUIT_TEMPLATE:
path = 'suit/'
else:
path = 'filebrowser/'
return path
|
|
# 6.00 Problem Set 4
#
# Caesar Cipher Skeleton
#
import string
import random
import numbers
WORDLIST_FILENAME = "words.txt"
# -----------------------------------
# Helper code
# (you don't need to understand this helper code)
def load_words():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print("Loading word list from file...")
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# line: string
line = inFile.readline()
# wordlist: list of strings
wordlist = line.split()
print(" ", len(wordlist), "words loaded.")
return wordlist
wordlist = load_words()
def is_word(wordlist, word):
"""
Determines if word is a valid word.
wordlist: list of words in the dictionary.
word: a possible word.
returns True if word is in wordlist.
Example:
>>> is_word(wordlist, 'bat') returns
True
>>> is_word(wordlist, 'asdf') returns
False
"""
word = word.lower()
word = word.strip(" !@#$%^&*()-_+={}[]|\:;'<>?,./\"")
return word in wordlist
def random_word(wordlist):
"""
Returns a random word.
wordlist: list of words
returns: a word from wordlist at random
"""
return random.choice(wordlist)
def random_string(wordlist, n):
"""
Returns a string containing n random words from wordlist
wordlist: list of words
returns: a string of random words separated by spaces.
"""
return " ".join([random_word(wordlist) for _ in range(n)])
def random_scrambled(wordlist, n):
"""
Generates a test string by generating an n-word random string
and encrypting it with a sequence of random shifts.
wordlist: list of words
n: number of random words to generate and scamble
returns: a scrambled string of n random words
NOTE:
This function will ONLY work once you have completed your
implementation of apply_shifts!
"""
s = random_string(wordlist, n) + " "
shifts = [(i, random.randint(0, 26)) for i in range(len(s)) if s[i-1] == ' ']
return apply_shifts(s, shifts)[:-1]
def get_fable_string():
"""
Returns a fable in encrypted text.
"""
f = open("fable.txt", "r")
fable = str(f.read())
f.close()
return fable
# (end of helper code)
# -----------------------------------
#
# Problem 1: Encryption
#
def build_coder(shift):
"""
Returns a dict that can apply a Caesar cipher to a letter.
The cipher is defined by the shift value. Ignores non-letter characters
like punctuation and numbers.
shift: -27 < int < 27
returns: dict
Example:
>>> build_coder(3)
{' ': 'c', 'A': 'D', 'C': 'F', 'B': 'E', 'E': 'H', 'D': 'G', 'G': 'J',
'F': 'I', 'I': 'L', 'H': 'K', 'K': 'N', 'J': 'M', 'M': 'P', 'L': 'O',
'O': 'R', 'N': 'Q', 'Q': 'T', 'P': 'S', 'S': 'V', 'R': 'U', 'U': 'X',
'T': 'W', 'W': 'Z', 'V': 'Y', 'Y': 'A', 'X': ' ', 'Z': 'B', 'a': 'd',
'c': 'f', 'b': 'e', 'e': 'h', 'd': 'g', 'g': 'j', 'f': 'i', 'i': 'l',
'h': 'k', 'k': 'n', 'j': 'm', 'm': 'p', 'l': 'o', 'o': 'r', 'n': 'q',
'q': 't', 'p': 's', 's': 'v', 'r': 'u', 'u': 'x', 't': 'w', 'w': 'z',
'v': 'y', 'y': 'a', 'x': ' ', 'z': 'b'}
(The order of the key-value pairs may be different.)
"""
assert shift >= 0 and shift < 27, 'shift %s is not between 0 and 27' % shift
#numbers.Integral used in case of long integers
assert isinstance(shift, numbers.Integral), 'shift is not an integer'
result = {}
upper = list(string.ascii_uppercase)
lower = list(string.ascii_lowercase)
upper.append(' ')
lower.append(' ')
for i in range(27):
result[lower[i]] = lower[(i + shift) % 27]
result[upper[i]] = upper[(i + shift) % 27]
return result
#print(build_coder(3))
def apply_coder(text, coder):
"""
Applies the coder to the text. Returns the encoded text.
text: string
coder: dict with mappings of characters to shifted characters
returns: text after mapping coder chars to original text
Example:
>>> apply_coder("Hello, world!", build_encoder(3))
'Khoor,czruog!'
>>> apply_coder("Khoor,czruog!", build_decoder(3))
'Hello, world!'
"""
# Pseudo Code
# init empty list
# start looping through 'text'
# check if current char is a letter .isalpha() or space.
# if is alpha or space, append value to temp string or list
# if not space or alpha, don't pass element from encoder
# return encoded string
assert type(text) is str, 'text is not a string'
encodedString = []
coderRef = coder
i = 0
while i < len(text):
if text[i].isalpha() or text[i] == ' ':
encodedString.append(coderRef[text[i]])
else:
encodedString.append(text[i])
i += 1
return ''.join(encodedString)
#print(apply_coder(2, build_coder(4)))
#print(apply_coder("Lipps,D svph!", build_coder(23)))
def apply_shift(text, shift):
"""
Given a text, returns a new text Caesar shifted by the given shift
offset. The empty space counts as the 27th letter of the alphabet,
so spaces should be replaced by a lowercase letter as appropriate.
Otherwise, lower case letters should remain lower case, upper case
letters should remain upper case, and all other punctuation should
stay as it is.
text: string to apply the shift to
shift: amount to shift the text
returns: text after being shifted by specified amount.
Example:
>>> apply_shift('This is a test.', 8)
'Apq hq hiham a.'
"""
assert type(text) is str, 'text is not a string'
assert shift >= 0 and shift < 27, 'shift %s is not between 0 and 27' % shift
return apply_coder(text,build_coder(shift))
# print(apply_shift('This is a test.', 8))
#
# Problem 2: Codebreaking.
#
def find_best_shift(wordlist, text):
"""
Decrypts the encoded text and returns the plaintext.
text: string
returns: 0 <= int 27
Example:
>>> s = apply_coder('Hello, world!', build_encoder(8))
>>> s
'Pmttw,hdwztl!'
>>> find_best_shift(wordlist, s) returns
8
>>> apply_coder(s, build_decoder(8)) returns
'Hello, world!'
"""
word = ""
tempWordList = []
shift = 0
for i in range(27):
word = apply_shift(text, i)
j = 0
element = ''
while j < len(word):
if word[j].isalpha():
element += word[j]
else:
tempWordList.append(element)
element = ""
tempWordList.append(word[j])
j += 1
for guess in tempWordList:
if guess.isalpha():
if guess.lower() in wordlist:
print(guess)
return i
print(tempWordList)
tempWordList = []
# s = apply_coder('Hello, world!', build_coder(8))
# print(find_best_shift(wordlist,s))
#
# Problem 3: Multi-level encryption.
#
def apply_shifts(text, shifts):
"""
Applies a sequence of shifts to an input text.
text: A string to apply the Ceasar shifts to
shifts: A list of tuples containing the location each shift should
begin and the shift offset. Each tuple is of the form (location,
shift) The shifts are layered: each one is applied from its
starting position all the way through the end of the string.
returns: text after applying the shifts to the appropriate
positions
Example:
>>> apply_shifts("Do Androids Dream of Electric Sheep?", [(0,6), (3, 18), (12, 16)])
'JufYkaolfapxQdrnzmasmRyrpfdvpmEurrb?'
"""
### TODO.
#
# Problem 4: Multi-level decryption.
#
def find_best_shifts(wordlist, text):
"""
Given a scrambled string, returns a shift key that will decode the text to
words in wordlist, or None if there is no such key.
Hint: Make use of the recursive function
find_best_shifts_rec(wordlist, text, start)
wordlist: list of words
text: scambled text to try to find the words for
returns: list of tuples. each tuple is (position in text, amount of shift)
Examples:
>>> s = random_scrambled(wordlist, 3)
>>> s
'eqorqukvqtbmultiform wyy ion'
>>> shifts = find_best_shifts(wordlist, s)
>>> shifts
[(0, 25), (11, 2), (21, 5)]
>>> apply_shifts(s, shifts)
'compositor multiform accents'
>>> s = apply_shifts("Do Androids Dream of Electric Sheep?", [(0,6), (3, 18), (12, 16)])
>>> s
'JufYkaolfapxQdrnzmasmRyrpfdvpmEurrb?'
>>> shifts = find_best_shifts(wordlist, s)
>>> print apply_shifts(s, shifts)
Do Androids Dream of Electric Sheep?
"""
def find_best_shifts_rec(wordlist, text, start):
"""
Given a scrambled string and a starting position from which
to decode, returns a shift key that will decode the text to
words in wordlist, or None if there is no such key.
Hint: You will find this function much easier to implement
if you use recursion.
wordlist: list of words
text: scambled text to try to find the words for
start: where to start looking at shifts
returns: list of tuples. each tuple is (position in text, amount of shift)
"""
### TODO.
def decrypt_fable():
"""
Using the methods you created in this problem set,
decrypt the fable given by the function get_fable_string().
Once you decrypt the message, be sure to include as a comment
at the end of this problem set how the fable relates to your
education at MIT.
returns: string - fable in plain text
"""
### TODO.
#What is the moral of the story?
#
#
#
#
#
|
|
# First we import parts of the frameworks we're using:
#
# Flask <http://flask.pocoo.org> is a simple framework for building web
# applications in Python. It handles basic things like parsing incoming
# HTTP requests and generating responses.
#
# Flask-RESTful <https://flask-restful.readthedocs.io/> is an add-on to Flask
# that makes it easier to build web applications that adhere to the REST
# architectural style.
from flask import (Flask, Response, request, render_template, make_response,
redirect)
from flask_restful import Api, Resource, reqparse, abort
# Next we import some standard Python libraries and functions:
#
# json <https://docs.python.org/3/library/json.html> for loading a JSON file
# from disk (our "database") into memory.
#
# random <https://docs.python.org/3/library/random.html> and string
# <https://docs.python.org/3/library/string.html> to help us generate
# unique IDs for help tickets from lowercase letters and digits.
#
# datetime <https://docs.python.org/3/library/datetime.html> to help us
# generate timestamps for help tickets.
#
# wraps <https://docs.python.org/3/library/functools.html#functools.wraps>
# is just a convenience function that will help us implement authentication.
import json
import random
import string
from datetime import datetime
from functools import wraps
# Define some constants for our priority levels.
# These are the values that the "priority" property can take on a help ticket.
PRIORITIES = ('closed', 'low', 'normal', 'high')
# Load data from disk.
# This simply loads the data from our "database," which is just a JSON file.
with open('data.jsonld') as data:
data = json.load(data)
# The next three functions implement simple authentication.
# Check that username and password are OK; DON'T DO THIS FOR REAL
def check_auth(username, password):
return username == 'admin' and password == 'secret'
# Issue an authentication challenge
def authenticate():
return Response(
'Please authenticate yourself', 401,
{'WWW-Authenticate': 'Basic realm="helpdesk"'})
# Decorator for methods that require authentication
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
# The following are three helper functions used in our resource classes.
# Generate a unique ID for a new help ticket.
# By default this will consist of six lowercase numbers and letters.
def generate_id(size=6, chars=string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
# Respond with 404 Not Found if no help ticket with the specified ID exists.
def error_if_helpticket_not_found(helpticket_id):
if helpticket_id not in data['helptickets']:
message = "No help ticket with ID: {}".format(helpticket_id)
abort(404, message=message)
# Filter and sort a list of helptickets.
def filter_and_sort_helptickets(query='', sort_by='time'):
# Returns True if the query string appears in the help ticket's
# title or description.
def matches_query(item):
(helpticket_id, helpticket) = item
text = helpticket['title'] + helpticket['description']
return query.lower() in text
# Returns the help ticket's value for the sort property (which by
# default is the "time" property).
def get_sort_value(item):
(helpticket_id, helpticket) = item
return helpticket[sort_by]
filtered_helptickets = filter(matches_query, data['helptickets'].items())
return sorted(filtered_helptickets, key=get_sort_value, reverse=True)
# Now we define three incoming HTTP request parsers using the Flask-RESTful
# framework <https://flask-restful.readthedocs.io/en/latest/reqparse.html>.
#
# The first (new_helpticket_parser) parses incoming POST requests and checks
# that they have the required values.
#
# The second (update_helpticket_parser) parses incoming PATCH requests and
# checks that they have the required values.
#
# The third (query_parser) parses incoming GET requests to get the parameters
# for sorting and filtering the list of help tickets.
# Helper function new_helpticket_parser. Raises an error if the string x
# is empty (has zero length).
def nonempty_string(x):
s = str(x)
if len(x) == 0:
raise ValueError('string is empty')
return s
# Specify the data necessary to create a new help ticket.
# "from", "title", and "description" are all required values.
new_helpticket_parser = reqparse.RequestParser()
for arg in ['from', 'title', 'description']:
new_helpticket_parser.add_argument(
arg, type=nonempty_string, required=True,
help="'{}' is a required value".format(arg))
# Specify the data necessary to update an existing help ticket.
# Only the priority and comments can be updated.
update_helpticket_parser = reqparse.RequestParser()
update_helpticket_parser.add_argument(
'priority', type=int, default=PRIORITIES.index('normal'))
update_helpticket_parser.add_argument(
'comment', type=str, default='')
# Specify the parameters for filtering and sorting help tickets.
# See `filter_and_sort_helptickets` above.
query_parser = reqparse.RequestParser()
query_parser.add_argument(
'query', type=str, default='')
query_parser.add_argument(
'sort_by', type=str, choices=('priority', 'time'), default='time')
# Then we define a couple of helper functions for inserting data into HTML
# templates (found in the templates/ directory). See
# <http://flask.pocoo.org/docs/latest/quickstart/#rendering-templates>.
# Given the data for a help ticket, generate an HTML representation
# of that help ticket.
def render_helpticket_as_html(helpticket):
return render_template(
'helpticket+microdata+rdfa.html',
helpticket=helpticket,
priorities=reversed(list(enumerate(PRIORITIES))))
# Given the data for a list of help tickets, generate an HTML representation
# of that list.
def render_helpticket_list_as_html(helptickets):
return render_template(
'helptickets+microdata+rdfa.html',
helptickets=helptickets,
priorities=PRIORITIES)
# Now we can start defining our resource classes. We define four classes:
# HelpTicket, HelpTicketAsJSON, HelpTicketList, and HelpTicketListAsJSON.
# All of them accept GET requests. HelpTicket also accepts PATCH requests,
# and HelpTicketList also accepts POST requests.
# Define our help ticket resource.
class HelpTicket(Resource):
# If a help ticket with the specified ID does not exist,
# respond with a 404, otherwise respond with an HTML representation.
def get(self, helpticket_id):
error_if_helpticket_not_found(helpticket_id)
return make_response(
render_helpticket_as_html(
data['helptickets'][helpticket_id]), 200)
# If a help ticket with the specified ID does not exist,
# respond with a 404, otherwise update the help ticket and respond
# with the updated HTML representation.
def patch(self, helpticket_id):
error_if_helpticket_not_found(helpticket_id)
helpticket = data['helptickets'][helpticket_id]
update = update_helpticket_parser.parse_args()
helpticket['priority'] = update['priority']
if len(update['comment'].strip()) > 0:
helpticket.setdefault('comments', []).append(update['comment'])
return make_response(
render_helpticket_as_html(helpticket), 200)
# Define a resource for getting a JSON representation of a help ticket.
class HelpTicketAsJSON(Resource):
# If a help ticket with the specified ID does not exist,
# respond with a 404, otherwise respond with a JSON representation.
def get(self, helpticket_id):
error_if_helpticket_not_found(helpticket_id)
helpticket = data['helptickets'][helpticket_id]
helpticket['@context'] = data['@context']
return helpticket
# Define our help ticket list resource.
class HelpTicketList(Resource):
# Respond with an HTML representation of the help ticket list, after
# applying any filtering and sorting parameters.
def get(self):
query = query_parser.parse_args()
return make_response(
render_helpticket_list_as_html(
filter_and_sort_helptickets(**query)), 200)
# Add a new help ticket to the list, and respond with an HTML
# representation of the updated list.
def post(self):
helpticket = new_helpticket_parser.parse_args()
helpticket_id = generate_id()
helpticket['@id'] = 'request/' + helpticket_id
helpticket['@type'] = 'helpdesk:HelpTicket'
helpticket['time'] = datetime.isoformat(datetime.now())
helpticket['priority'] = PRIORITIES.index('normal')
data['helptickets'][helpticket_id] = helpticket
return make_response(
render_helpticket_list_as_html(
filter_and_sort_helptickets()), 201)
# Define a resource for getting a JSON representation of the help ticket list.
class HelpTicketListAsJSON(Resource):
def get(self):
return data
# After defining our resource classes, we define how URLs are assigned to
# resources by mapping resource classes to URL patterns.
app = Flask(__name__)
api = Api(app)
api.add_resource(HelpTicketList, '/tickets')
api.add_resource(HelpTicketListAsJSON, '/tickets.json')
api.add_resource(HelpTicket, '/ticket/<string:helpticket_id>')
api.add_resource(HelpTicketAsJSON, '/ticket/<string:helpticket_id>.json')
# There is no resource mapped to the root path (/), so if a request comes in
# for that, redirect to the HelpTicketList resource.
@app.route('/')
def index():
return redirect(api.url_for(HelpTicketList), code=303)
# Finally we add some headers to all of our HTTP responses which will allow
# JavaScript loaded from other domains and running in the browser to load
# representations of our resources (for security reasons, this is disabled
# by default.
@app.after_request
def after_request(response):
response.headers.add(
'Access-Control-Allow-Origin', '*')
response.headers.add(
'Access-Control-Allow-Headers', 'Content-Type,Authorization')
response.headers.add(
'Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')
return response
# Now we can start the server.
if __name__ == '__main__':
app.run(
host='0.0.0.0',
port=5555,
debug=True,
use_debugger=False,
use_reloader=False)
|
|
import socket
import struct
import sys
import time
import threading
from Queue import Queue
from msgpack import (
packb as packs,
unpackb as unpacks
)
__all__ = ['Agent']
class Agent(object):
"""
Validate and package the metrics for graphdat
"""
# if the queue gets larger than this, stop adding metrics instead of blocking
MAX_QUEUE_SIZE = 100
# The queue will hold all of the messages to be sent to graphdat
_queue = Queue()
# The background worker push the data to graphdat
_backgroundWorker = None
def __init__(self, graphdat):
if graphdat is None:
raise TypeError(
"the graphdat parameter should not be None")
self.graphdat = graphdat
self.log = self.graphdat.log
# create the background worker thread if it is not running already
if not self._backgroundWorker or not self._backgroundWorker.isAlive():
self._backgroundWorker = _SendToGraphdat(self.graphdat, self._queue)
self._backgroundWorker.daemon = True
self._backgroundWorker.start()
def add(self, metrics):
"""
Add metrics to your graphdat dashboard
"""
# if we have no data, no worries, continue on
if metrics is None:
return
# if its a string, we cant do anything with it and we shouldnt
# be getting it in the first place, something is probably wrong
if isinstance(metrics, str):
raise TypeError(
"the metrics should not be a string value")
# if its a single metric, just wrap it
if not hasattr(metrics, "__iter__"):
metrics = (metrics)
for metric in metrics:
# Only HTTP metrics are supported
if metric.source != 'HTTP':
continue
if not metric.route:
self.log("graphdat could not get a the route from the trace")
continue
# send the metric to the queue as long as we have room
if self._queue.qsize() < self.MAX_QUEUE_SIZE:
self._queue.put(metric)
class _SendToGraphdat(threading.Thread):
"""
Create a separate thread to pull from the queue
and send the messages to graphdat.
"""
# The heartbeat worker keeps the file socket open
_heartbeatWorker = None
def __init__(self, graphdat, queue):
threading.Thread.__init__(self)
# the graphdat instance and logger
if graphdat is None:
raise TypeError(
"the graphdat parameter should not be None")
self.graphdat = graphdat
self.dump = graphdat.dump
self.error = graphdat.error
self.log = graphdat.log
# the queue to pull the messages from
if queue is None:
raise TypeError(
"the queue parameter should not be None")
self.queue = queue
# keep track of the last time we sent the data or a heartbeart
self.lastSentData = time.time()
# how we talk to the graphdat agent
if hasattr(self.graphdat, "socketFile"):
self.transport = _FileSocket(self.graphdat)
else:
self.transport = _UDPSocket(self.graphdat)
# if the transport requires a heartbeat, start it
if hasattr(self.transport, 'heartbeatInterval'):
if not self._heartbeatWorker or not self._heartbeatWorker.isAlive():
self._heartbeatWorker = _SendHeartbeat(self.graphdat, self, self.transport)
self._heartbeatWorker.daemon = True
self._heartbeatWorker.start()
def run(self):
while True:
# grab the next message
message = self.queue.get(block=True)
# we have a message to send, the heart beat
# can take a break
self.lastSentData = time.time()
# msgpack it
message = packs(message)
# send the message
success = self.transport.send(message)
# tell the queue we are done
self.queue.task_done()
if (success):
self.log("Message sent")
self.dump(unpacks(message, use_list=True))
else:
self.error("Sending metrics to Graphdat failed")
class _SendHeartbeat(threading.Thread):
"""
Create a separate thread to send a heartbeat
to keep the connection open
"""
def __init__(self, graphdat, sender, transport):
threading.Thread.__init__(self)
self.sender = sender
self.transport = transport
def run(self):
while True:
time.sleep(self.transport.heartbeatInterval)
now = time.time()
elapsed = now - self.sender.lastSentData
if elapsed > self.transport.heartbeatInterval:
self.transport.sendHeartbeat()
self.sender.lastSentData = now
class _FileSocket(object):
"""
Use a File socket to talk to the Graphdat Agent
"""
# the interval we should send heart beats to the file socket
HEARTBEAT_INTERVAL = 30
# How many attempts do we use to send to the file socket.
SEND_ATTEMPTS = 3
def __init__(self, graphdat,
heartbeatInterval=HEARTBEAT_INTERVAL,
sendAttempts=SEND_ATTEMPTS):
self.error = graphdat.error
self.log = graphdat.log
# the location of the file socket
self.socketFile = graphdat.socketFile
# the file socket needs a hearbeat to stay open
self.heartbeatInterval = heartbeatInterval
# How many attempts do we use to send to the file socket.
self.sendAttempts = sendAttempts
# the file socket
self.sock = None
self.isOpen = False
def __del__(self):
self._disconnect()
def send(self, message):
"""
Send the metrics to graphdat
"""
sent = False
length = len(message)
header = struct.pack(">i", length)
for i in range(self.sendAttempts):
# open the socket if we are not connected
if not self.isOpen:
self._connect()
# if we are still not open, close the socket and try again
if not self.isOpen:
self._disconnect()
continue
try:
# we send the header first, it tells the agent how long
# the message we are sending is
headerSent = self.sock.send(header)
# was the header received, if not raise the error and try again
if headerSent != len(header):
raise IOError("Socket connection was broken when sending message header")
# empty messages are used as a heartbeat to keep the socket connection open
# we only need to send the header
if length == 0:
sent = True
break
# send the message
totalSent = 0
while totalSent < length:
sent = self.sock.send(message[totalSent:])
if sent == 0:
raise IOError("Socket connection was broken while sending message")
totalSent += sent
# success!
sent = (totalSent == length)
break
except IOError, msg:
self.error("socket error")
self.error(msg)
self._disconnect()
except Exception, msg:
self.error("Unexpected error")
self.error(msg)
self._disconnect()
return sent
def sendHeartbeat(self):
"""
Send a heart beat to the socket to let the agent know we are alive
"""
# just send an empty message
self.send("")
def _connect(self):
try:
self.log("opening socket " + self.socketFile)
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.settimeout(3)
self.sock.connect(self.socketFile)
self.isOpen = True
except Exception, msg:
self.error(msg)
self.isOpen = False
def _disconnect(self):
try:
self.log("closing socket %s" % self.socketFile)
self.sock.close()
self.sock = None
except Exception, msg:
self.error(msg)
self.isOpen = False
class _UDPSocket(object):
"""
Use a UDP socket to talk to the Graphdat Agent
"""
def __init__(self, graphdat):
self.error = graphdat.error
self.host = graphdat.socketHost
self.port = graphdat.socketPort
self.sock = None
def send(self, message):
"""
Send the metrics to graphdat
"""
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.sendto(message, (self.host, self.port))
return True
except:
self.error("Unexpected error:", sys.exc_info()[0])
return False
|
|
# Copyright (c) 2010 Stephen Paul Weber. Based on work by Joao Prado Maia.
# Licensed under the ISC License
import MySQLdb
import time
from mimify import mime_encode_header, mime_decode_header
import re
import settings
import mime
import strutil
import os.path
try:
import html2text
except ImportError:
html2text = None # Optional, GPL
# patch by Andreas Wegmann <Andreas.Wegmann@VSA.de> to fix the handling of unusual encodings of messages
q_quote_multiline = re.compile("=\?(.*?)\?[qQ]\?(.*?)\?=.*?=\?\\1\?[qQ]\?(.*?)\?=", re.M | re.S)
# we don't need to compile the regexps everytime..
doubleline_regexp = re.compile("^\.\.", re.M)
singleline_regexp = re.compile("^\.", re.M)
from_regexp = re.compile("^From:(.*)<(.*)>", re.M)
subject_regexp = re.compile("^Subject:(.*)", re.M)
references_regexp = re.compile("^References:(.*)<(.*)>", re.M)
lines_regexp = re.compile("^Lines:(.*)", re.M)
class Papercut_Storage:
"""
Storage Backend interface for the Wordpress blog software
This is the interface for Wordpress running on a MySQL database. For more information
on the structure of the 'storage' package, please refer to the __init__.py
available on the 'storage' sub-directory.
"""
def __init__(self):
self.conn = MySQLdb.connect(host=settings.dbhost, db=settings.dbname, user=settings.dbuser, passwd=settings.dbpass, charset='utf8', use_unicode=True)
self.cursor = self.conn.cursor()
self.cursor.execute("""CREATE TABLE IF NOT EXISTS wp_newsgroup_meta(
article_number BIGINT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY,
message_id CHAR(255) UNIQUE NOT NULL,
id BIGINT NOT NULL, tbl CHAR(50), newsgroup CHAR(255),
CONSTRAINT UNIQUE INDEX id_table (id, tbl),
INDEX newsgroup (newsgroup)
)""")
self.update_newsgroup_meta()
def get_message_body(self, headers):
"""Parses and returns the most appropriate message body possible.
The function tries to extract the plaintext version of a MIME based
message, and if it is not available then it returns the html version.
"""
return mime.get_text_message(headers)
def quote_string(self, text):
"""Quotes strings the MySQL way."""
return text.replace("'", "\\'")
def group_exists(self, group_name):
return (group_name == 'blog.singpolyma') # TODO
def update_newsgroup_meta(self):
group = 'blog.singpolyma' # TODO
meta_table = self.get_table_name(table_name='newsgroup_meta')
posts_table = self.get_table_name(table_name='posts')
comments_table = self.get_table_name(table_name='comments')
stmt = """ INSERT INTO wp_newsgroup_meta (id, tbl, message_id, newsgroup)
SELECT ID, tbl, message_id, '%s' FROM (
(SELECT
a.ID, 'wp_posts' AS tbl,
CONCAT('<post-', a.ID, '@%s>') AS message_id,
post_date_gmt AS datestamp
FROM
wp_posts a LEFT JOIN wp_newsgroup_meta b ON a.ID=b.id AND b.tbl='wp_posts'
WHERE
isNULL(b.id) AND post_type='post' AND post_status='publish'
) UNION (
SELECT
comment_ID as ID, 'wp_comments' AS tbl,
CONCAT('<comment-', comment_ID, '@%s>') AS message_id,
comment_date_gmt AS datestamp
FROM
wp_posts c, wp_comments a LEFT JOIN wp_newsgroup_meta b ON comment_ID=b.id AND b.tbl='wp_comments'
WHERE
a.comment_post_ID=c.ID AND
isNULL(b.id) AND comment_approved='1' AND
post_type='post' AND post_status='publish'
)
ORDER BY datestamp) t
""".replace('wp_posts', posts_table).replace('wp_comments', comments_table).replace('wp_newsgroup_meta', meta_table) % (group, settings.nntp_hostname, settings.nntp_hostname)
self.cursor.execute(stmt)
def article_exists(self, group_name, style, range):
self.update_newsgroup_meta()
table_name = self.get_table_name(table_name='newsgroup_meta')
stmt = """
SELECT
COUNT(*) AS total
FROM
%s
WHERE
newsgroup='%s' AND """ % (table_name, group_name)
if style == 'range':
stmt = "%s AND article_number > %s" % (stmt, range[0])
if len(range) == 2:
stmt = "%s AND article_number < %s" % (stmt, range[1])
else:
stmt = "%s AND article_number = %s" % (stmt, range[0])
self.cursor.execute(stmt)
return self.cursor.fetchone()[0]
def get_first_article(self, group_name):
self.update_newsgroup_meta()
table_name = self.get_table_name(table_name='newsgroup_meta')
stmt = """
SELECT
IF(MIN(message_num) IS NULL, 0, MIN(message_num)) AS first_article
FROM
%s
WHERE
newsgroup='%s'""" % (table_name, group_name)
num_rows = self.cursor.execute(stmt)
return self.cursor.fetchone()[0]
def get_group_stats(self, group_name):
self.update_newsgroup_meta()
table_name = self.get_table_name(table_name='newsgroup_meta')
stmt = """
SELECT
COUNT(article_number) AS total,
IF(MAX(article_number) IS NULL, 0, MAX(article_number)) AS maximum,
IF(MIN(article_number) IS NULL, 0, MIN(article_number)) AS minimum
FROM
%s
WHERE
newsgroup='%s'""" % (table_name, group_name)
self.cursor.execute(stmt)
total, maxi, mini = self.cursor.fetchone()
return (total, mini, maxi, group_name)
def get_table_name(self, group_name=None, table_name=None):
if not table_name:
table_name = 'posts'
return 'wp_' + table_name # TODO
def get_message_id(self, msg_num, group, table=None):
table_name = self.get_table_name(table_name='newsgroup_meta')
compar = table and 'id' or 'article_number'
stmt = """
SELECT
message_id
FROM
%s
WHERE
newsgroup='%s' AND %s=%s
""" % (table_name, group, compar, int(msg_num))
if table:
stmt += " AND tbl='%s'" % self.get_table_name(table_name=table)
self.cursor.execute(stmt)
return self.cursor.fetchone()[0]
def get_article_sql(self):
meta_table = self.get_table_name(table_name='newsgroup_meta')
posts_table = self.get_table_name(table_name='posts')
comments_table = self.get_table_name(table_name='comments')
stmt = """
SELECT M.article_number,S.*,M.message_id FROM (
(SELECT
A.ID as ID,
display_name,
user_email,
post_title,
UNIX_TIMESTAMP(post_date_gmt) AS datestamp,
post_content,
post_parent,
0 AS comment_parent
FROM
wp_posts A,
wp_users
WHERE
A.post_type='post' AND A.post_status='publish' AND
A.post_author=wp_users.ID
) UNION (
SELECT
comment_ID AS ID,
IF(user_id = 0, comment_author, display_name) as display_name,
IF(user_id = 0, comment_author_email, user_email) as user_email,
CONCAT('Re: ', post_title) as post_title,
UNIX_TIMESTAMP(comment_date_gmt) AS datestamp,
comment_content AS post_content,
comment_post_ID AS post_parent,
comment_parent
FROM
wp_comments A LEFT OUTER JOIN
wp_users ON user_id=wp_users.ID,
wp_posts
WHERE
comment_post_ID=wp_posts.ID AND
comment_approved='1' AND
wp_posts.post_type='post' AND wp_posts.post_status='publish'
) ) S, wp_newsgroup_meta M
WHERE
M.id=S.ID
""".replace('wp_posts', posts_table).replace('wp_comments', comments_table).replace('wp_newsgroup_meta', meta_table)
return stmt
def get_NEWGROUPS(self, ts, group='%'):
return None # TODO
def get_NEWNEWS(self, ts, group='*'):
self.update_newsgroup_meta()
group = 'blog.singpolyma' # TODO
meta_table = self.get_table_name(table_name='newsgroup_meta')
posts_table = self.get_table_name(table_name='posts')
comments_table = self.get_table_name(table_name='comments')
ts = int(time.mktime(ts))
stmt = """
(SELECT
article_number
FROM
wp_posts, wp_newsgroup_meta
WHERE
wp_posts.ID=wp_newsgroup_meta.id AND wp_newsgroup_meta.tbl='wp_posts' AND
post_type='post' AND post_status='publish' AND
UNIX_TIMESTAMP(post_date_gmt) >= %s
) UNION (
SELECT
article_number
FROM
wp_comments,
wp_posts,
wp_newsgroup_meta
WHERE
comment_ID=wp_newsgroup_meta.id AND wp_newsgroup_meta.tbl='wp_comments' AND
comment_post_ID=wp_posts.ID AND
post_type='post' AND post_status='publish' AND
comment_approved = '1' AND
UNIX_TIMESTAMP(comment_date_gmt) >= %s
)
ORDER BY
article_number ASC""" % (ts, ts)
stmt = stmt.replace('wp_posts', posts_table).replace('wp_comments', comments_table).replace('wp_newsgroup_meta', meta_table)
self.cursor.execute(stmt)
result = list(self.cursor.fetchall())
return "\r\n".join(["%s" % k for k in result])
def get_GROUP(self, group_name):
stats = self.get_group_stats(group_name)
return (stats[0], stats[1], stats[2])
def get_LIST(self, username=""):
lists = []
stats = self.get_group_stats('blog.singpolyma')
lists.append("%s %s %s y" % ('blog.singpolyma', stats[2], stats[1])) # TODO
return "\r\n".join(lists)
def get_STAT(self, group_name, id):
meta_table = self.get_table_name(table_name='newsgroup_meta')
stmt = """
SELECT
article_number
FROM
%s
WHERE
newsgroup='%s' AND
article_number=%s""" % (meta_table, group_name, id)
return self.cursor.execute(stmt)
def get_ARTICLE(self, group_name, id, headers_only=False, body_only=False):
stmt = self.get_article_sql()
if str(id).count('<') > 0 or str(id).count('@') > 0:
id = self.quote_string(id)
stmt += " AND message_id='%s'" % (id,)
else:
id = int(id)
stmt += " AND article_number=%s" % (id,)
num_rows = self.cursor.execute(stmt)
if num_rows == 0:
return None
result = list(self.cursor.fetchone())
if not body_only:
if len(result[3]) == 0:
author = result[2]
else:
author = "%s <%s>" % (result[2], result[3])
formatted_time = strutil.get_formatted_time(time.localtime(result[5]))
headers = []
headers.append("Path: %s" % (settings.nntp_hostname))
headers.append("From: %s" % (author))
headers.append("Newsgroups: %s" % (group_name))
headers.append("Date: %s" % (formatted_time))
headers.append("Subject: %s" % (result[4]))
headers.append("Message-ID: %s" % (result[9]))
headers.append("Xref: %s %s:%s" % (settings.nntp_hostname, group_name, result[0]))
parent = []
if result[7] != 0:
parent.append(self.get_message_id(result[7], group_name, 'posts'))
if result[8] != 0:
parent.append(self.get_message_id(result[8], group_name, 'comments'))
if len(parent) > 0:
headers.append("References: " + ', '.join(parent))
headers.append("In-Reply-To: " + parent.pop())
headers.append('Content-Type: text/plain; charset=utf-8')
if headers_only:
return "\r\n".join(headers)
if html2text:
body = html2text.html2text(result[6].encode('utf-8').replace("\r\n", "\n").replace("\r", "\n").replace("\n\n", "</p><p>")).encode('utf-8')
else:
body = strutil.format_body(result[6].encode('utf-8'))
if body_only:
return body
return ("\r\n".join(headers).encode('utf-8'), body)
def get_LAST(self, group_name, current_id):
meta_table = self.get_table_name(table_name='newsgroup_meta')
stmt = """
SELECT
article_number
FROM
%s
WHERE
newsgroup='%s' AND article_number < %s
ORDER BY
ID DESC
LIMIT 0, 1
""" % (meta_table, group_name, current_id)
num_rows = self.cursor.execute(stmt)
if num_rows == 0:
return None
return self.cursor.fetchone()[0]
def get_NEXT(self, group_name, current_id):
meta_table = self.get_table_name(table_name='newsgroup_meta')
stmt = """
SELECT
article_number
FROM
%s
WHERE
newsgroup='%s' AND article_number > %s
ORDER BY
ID ASC
LIMIT 0, 1
""" % (meta_table, group_name, current_id)
num_rows = self.cursor.execute(stmt)
if num_rows == 0:
return None
return self.cursor.fetchone()[0]
def get_HEAD(self, group_name, id):
return self.get_ARTICLE(group_name, id, headers_only=True)
def get_BODY(self, group_name, id):
return self.get_ARTICLE(group_name, id, body_only=True)
def get_XOVER(self, group_name, start_id, end_id='ggg'):
self.update_newsgroup_meta()
stmt = self.get_article_sql()
stmt += " AND article_number >= %s" % (start_id,)
if end_id != 'ggg':
stmt += " AND article_number <= %s" % (end_id,)
self.cursor.execute(stmt)
result = list(self.cursor.fetchall())
overviews = []
for row in result:
if html2text:
body = html2text.html2text(row[6].encode('utf-8')).encode('utf-8')
else:
body = strutil.format_body(row[6].encode('utf-8'))
if row[3] == '':
author = row[2]
else:
author = "%s <%s>" % (row[2], row[3])
formatted_time = strutil.get_formatted_time(time.localtime(row[5]))
message_id = row[9]
line_count = body.count("\n")
xref = 'Xref: %s %s:%s' % (settings.nntp_hostname, group_name, row[0])
parent = []
if row[7] != 0:
parent.append(self.get_message_id(row[7], group_name, 'posts'))
if row[8] != 0:
parent.append(self.get_message_id(row[8], group_name, 'comments'))
reference = ', '.join(parent)
# message_number <tab> subject <tab> author <tab> date <tab> message_id <tab> reference <tab> bytes <tab> lines <tab> xref
overviews.append("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % (row[0], row[4], author, formatted_time, message_id, reference, len(body), line_count, xref))
return "\r\n".join(overviews)
def get_XPAT(self, group_name, header, pattern, start_id, end_id='ggg'):
return None # TODO: really broken
# XXX: need to actually check for the header values being passed as
# XXX: not all header names map to column names on the tables
table_name = self.get_table_name(group_name)
stmt = """
SELECT
A.ID,
post_parent,
display_name,
user_email,
post_title,
UNIX_TIMESTAMP(post_date_gmt) AS datestamp,
post_content
FROM
%s A,
wp_users
WHERE
A.post_type='post' AND A.post_status='publish' AND
%s REGEXP '%s' AND
post_author = wp_users.ID AND
A.ID >= %s""" % (table_name, header, strutil.format_wildcards(pattern), start_id)
if end_id != 'ggg':
stmt = "%s AND A.id <= %s" % (stmt, end_id)
num_rows = self.cursor.execute(stmt)
if num_rows == 0:
return None
result = list(self.cursor.fetchall())
hdrs = []
for row in result:
if header.upper() == 'SUBJECT':
hdrs.append('%s %s' % (row[0], row[4]))
elif header.upper() == 'FROM':
# XXX: totally broken with empty values for the email address
hdrs.append('%s %s <%s>' % (row[0], row[2], row[3]))
elif header.upper() == 'DATE':
hdrs.append('%s %s' % (row[0], strutil.get_formatted_time(time.localtime(result[5]))))
elif header.upper() == 'MESSAGE-ID':
hdrs.append(row[0] + ' ' + self.get_message_id(row[0], group_name))
elif (header.upper() == 'REFERENCES') and (row[1] != 0):
hdrs.append(row[0] + ' ' + self.message_id(row[1], group_name))
elif header.upper() == 'BYTES':
hdrs.append('%s %s' % (row[0], len(row[6])))
elif header.upper() == 'LINES':
hdrs.append('%s %s' % (row[0], len(row[6].split('\n'))))
elif header.upper() == 'XREF':
hdrs.append('%s %s %s:%s' % (row[0], settings.nntp_hostname, group_name, row[0]))
if len(hdrs) == 0:
return ""
else:
return "\r\n".join(hdrs)
def get_LISTGROUP(self, group_name):
self.update_newsgroup_meta()
meta_table = self.get_table_name(table_name='newsgroup_meta')
stmt = """
SELECT
article_number
FROM
%s
WHERE
newsgroup='%s'
""" % (meta_table, group_name)
self.cursor.execute(stmt)
result = list(self.cursor.fetchall())
return "\r\n".join(["%s" % k for k in result])
def get_XGTITLE(self, pattern=None):
return "blog.singpolyma Singpolyma" # TODO
def get_XHDR(self, group_name, header, style, range):
self.update_newsgroup_meta()
stmt = self.get_article_sql()
if style == 'range':
stmt += ' AND article_number >= %s' % (range[0],)
if len(range) == 2:
stmt += ' AND article_number <= %s' % (range[1])
else:
stmt += ' AND article_number = %s' % (range[0],)
if self.cursor.execute(stmt) == 0:
return None
result = self.cursor.fetchall()
hdrs = []
for row in result:
parent = []
if row[7] != 0:
parent.append(self.get_message_id(row[7], group_name, 'posts'))
if row[8] != 0:
parent.append(self.get_message_id(row[8], group_name, 'comments'))
if header.upper() == 'SUBJECT':
hdrs.append('%s %s' % (row[0], row[4]))
elif header.upper() == 'FROM':
hdrs.append('%s %s <%s>' % (row[0], row[2], row[3]))
elif header.upper() == 'DATE':
hdrs.append('%s %s' % (row[0], strutil.get_formatted_time(time.localtime(result[5]))))
elif header.upper() == 'MESSAGE-ID':
hdrs.append(row[0] + ' ' + row[9])
elif (header.upper() == 'REFERENCES') and len(parent) > 0:
hdrs.append('%s %s' % (row[0], ', '.join(parent)))
elif header.upper() == 'BYTES':
hdrs.append('%s %s' % (row[0], len(row[6])))
elif header.upper() == 'LINES':
hdrs.append('%s %s' % (row[0], len(row[6].split('\n'))))
elif header.upper() == 'XREF':
hdrs.append('%s %s %s:%s' % (row[0], settings.nntp_hostname, group_name, row[0]))
if len(hdrs) == 0:
return ""
else:
return "\r\n".join(hdrs)
def do_POST(self, group_name, lines, ip_address, username=''):
return None # TODO, below code from other engine, just for reference
table_name = self.get_table_name(group_name)
body = self.get_message_body(lines)
author, email = from_regexp.search(lines, 0).groups()
subject = subject_regexp.search(lines, 0).groups()[0].strip()
# patch by Andreas Wegmann <Andreas.Wegmann@VSA.de> to fix the handling of unusual encodings of messages
lines = mime_decode_header(re.sub(q_quote_multiline, "=?\\1?Q?\\2\\3?=", lines))
if lines.find('References') != -1:
# get the 'modifystamp' value from the parent (if any)
references = references_regexp.search(lines, 0).groups()
parent_id, void = references[-1].strip().split('@')
stmt = """
SELECT
IF(MAX(id) IS NULL, 1, MAX(id)+1) AS next_id
FROM
%s""" % (table_name)
num_rows = self.cursor.execute(stmt)
if num_rows == 0:
new_id = 1
else:
new_id = self.cursor.fetchone()[0]
stmt = """
SELECT
id,
thread,
modifystamp
FROM
%s
WHERE
approved='Y' AND
id=%s
GROUP BY
id""" % (table_name, parent_id)
num_rows = self.cursor.execute(stmt)
if num_rows == 0:
return None
parent_id, thread_id, modifystamp = self.cursor.fetchone()
else:
stmt = """
SELECT
IF(MAX(id) IS NULL, 1, MAX(id)+1) AS next_id,
UNIX_TIMESTAMP()
FROM
%s""" % (table_name)
self.cursor.execute(stmt)
new_id, modifystamp = self.cursor.fetchone()
parent_id = 0
thread_id = new_id
stmt = """
INSERT INTO
%s
(
id,
datestamp,
thread,
parent,
author,
subject,
email,
host,
email_reply,
approved,
msgid,
modifystamp,
userid
) VALUES (
%s,
NOW(),
%s,
%s,
'%s',
'%s',
'%s',
'%s',
'N',
'Y',
'',
%s,
0
)
""" % (table_name, new_id, thread_id, parent_id, self.quote_string(author.strip()), self.quote_string(subject), self.quote_string(email), ip_address, modifystamp)
if not self.cursor.execute(stmt):
return None
else:
# insert into the '*_bodies' table
stmt = """
INSERT INTO
%s_bodies
(
id,
body,
thread
) VALUES (
%s,
'%s',
%s
)""" % (table_name, new_id, self.quote_string(body), thread_id)
if not self.cursor.execute(stmt):
# delete from 'table_name' before returning..
stmt = """
DELETE FROM
%s
WHERE
id=%s""" % (table_name, new_id)
self.cursor.execute(stmt)
return None
else:
# alert forum moderators
self.send_notifications(group_name, new_id, thread_id, parent_id, author.strip(), email, subject, body)
return 1
|
|
"""Support for Z-Wave climate devices."""
# Because we do not compile openzwave on CI
import logging
from typing import Optional, Tuple
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.climate.const import (
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
CURRENT_HVAC_FAN,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
DOMAIN,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_BOOST,
PRESET_NONE,
SUPPORT_AUX_HEAT,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_SWING_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import ZWaveDeviceEntity, const
_LOGGER = logging.getLogger(__name__)
CONF_NAME = "name"
DEFAULT_NAME = "Z-Wave Climate"
REMOTEC = 0x5254
REMOTEC_ZXT_120 = 0x8377
REMOTEC_ZXT_120_THERMOSTAT = (REMOTEC, REMOTEC_ZXT_120)
ATTR_OPERATING_STATE = "operating_state"
ATTR_FAN_STATE = "fan_state"
ATTR_FAN_ACTION = "fan_action"
AUX_HEAT_ZWAVE_MODE = "Aux Heat"
# Device is in manufacturer specific mode (e.g. setting the valve manually)
PRESET_MANUFACTURER_SPECIFIC = "Manufacturer Specific"
WORKAROUND_ZXT_120 = "zxt_120"
DEVICE_MAPPINGS = {REMOTEC_ZXT_120_THERMOSTAT: WORKAROUND_ZXT_120}
HVAC_STATE_MAPPINGS = {
"off": HVAC_MODE_OFF,
"heat": HVAC_MODE_HEAT,
"heat mode": HVAC_MODE_HEAT,
"heat (default)": HVAC_MODE_HEAT,
"furnace": HVAC_MODE_HEAT,
"fan only": HVAC_MODE_FAN_ONLY,
"dry air": HVAC_MODE_DRY,
"moist air": HVAC_MODE_DRY,
"cool": HVAC_MODE_COOL,
"heat_cool": HVAC_MODE_HEAT_COOL,
"auto": HVAC_MODE_HEAT_COOL,
"auto changeover": HVAC_MODE_HEAT_COOL,
}
MODE_SETPOINT_MAPPINGS = {
"off": (),
"heat": ("setpoint_heating",),
"cool": ("setpoint_cooling",),
"auto": ("setpoint_heating", "setpoint_cooling"),
"aux heat": ("setpoint_heating",),
"furnace": ("setpoint_furnace",),
"dry air": ("setpoint_dry_air",),
"moist air": ("setpoint_moist_air",),
"auto changeover": ("setpoint_auto_changeover",),
"heat econ": ("setpoint_eco_heating",),
"cool econ": ("setpoint_eco_cooling",),
"away": ("setpoint_away_heating", "setpoint_away_cooling"),
"full power": ("setpoint_full_power",),
# aliases found in xml configs
"comfort": ("setpoint_heating",),
"heat mode": ("setpoint_heating",),
"heat (default)": ("setpoint_heating",),
"dry floor": ("setpoint_dry_air",),
"heat eco": ("setpoint_eco_heating",),
"energy saving": ("setpoint_eco_heating",),
"energy heat": ("setpoint_eco_heating",),
"vacation": ("setpoint_away_heating", "setpoint_away_cooling"),
# for tests
"heat_cool": ("setpoint_heating", "setpoint_cooling"),
}
HVAC_CURRENT_MAPPINGS = {
"idle": CURRENT_HVAC_IDLE,
"heat": CURRENT_HVAC_HEAT,
"pending heat": CURRENT_HVAC_IDLE,
"heating": CURRENT_HVAC_HEAT,
"cool": CURRENT_HVAC_COOL,
"pending cool": CURRENT_HVAC_IDLE,
"cooling": CURRENT_HVAC_COOL,
"fan only": CURRENT_HVAC_FAN,
"vent / economiser": CURRENT_HVAC_FAN,
"off": CURRENT_HVAC_OFF,
}
PRESET_MAPPINGS = {
"away": PRESET_AWAY,
"full power": PRESET_BOOST,
"manufacturer specific": PRESET_MANUFACTURER_SPECIFIC,
}
DEFAULT_HVAC_MODES = [
HVAC_MODE_HEAT_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_DRY,
HVAC_MODE_OFF,
HVAC_MODE_AUTO,
]
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Z-Wave Climate device from Config Entry."""
@callback
def async_add_climate(climate):
"""Add Z-Wave Climate Device."""
async_add_entities([climate])
async_dispatcher_connect(hass, "zwave_new_climate", async_add_climate)
def get_device(hass, values, **kwargs):
"""Create Z-Wave entity device."""
temp_unit = hass.config.units.temperature_unit
if values.primary.command_class == const.COMMAND_CLASS_THERMOSTAT_SETPOINT:
return ZWaveClimateSingleSetpoint(values, temp_unit)
if values.primary.command_class == const.COMMAND_CLASS_THERMOSTAT_MODE:
return ZWaveClimateMultipleSetpoint(values, temp_unit)
return None
class ZWaveClimateBase(ZWaveDeviceEntity, ClimateDevice):
"""Representation of a Z-Wave Climate device."""
def __init__(self, values, temp_unit):
"""Initialize the Z-Wave climate device."""
ZWaveDeviceEntity.__init__(self, values, DOMAIN)
self._target_temperature = None
self._target_temperature_range = (None, None)
self._current_temperature = None
self._hvac_action = None
self._hvac_list = None # [zwave_mode]
self._hvac_mapping = None # {ha_mode:zwave_mode}
self._hvac_mode = None # ha_mode
self._aux_heat = None
self._default_hvac_mode = None # ha_mode
self._preset_mapping = None # {ha_mode:zwave_mode}
self._preset_list = None # [zwave_mode]
self._preset_mode = None # ha_mode if exists, else zwave_mode
self._current_fan_mode = None
self._fan_modes = None
self._fan_action = None
self._current_swing_mode = None
self._swing_modes = None
self._unit = temp_unit
_LOGGER.debug("temp_unit is %s", self._unit)
self._zxt_120 = None
# Make sure that we have values for the key before converting to int
if self.node.manufacturer_id.strip() and self.node.product_id.strip():
specific_sensor_key = (
int(self.node.manufacturer_id, 16),
int(self.node.product_id, 16),
)
if specific_sensor_key in DEVICE_MAPPINGS:
if DEVICE_MAPPINGS[specific_sensor_key] == WORKAROUND_ZXT_120:
_LOGGER.debug("Remotec ZXT-120 Zwave Thermostat workaround")
self._zxt_120 = 1
self.update_properties()
def _mode(self) -> None:
"""Return thermostat mode Z-Wave value."""
raise NotImplementedError()
def _current_mode_setpoints(self) -> Tuple:
"""Return a tuple of current setpoint Z-Wave value(s)."""
raise NotImplementedError()
@property
def supported_features(self):
"""Return the list of supported features."""
support = SUPPORT_TARGET_TEMPERATURE
if self._hvac_list and HVAC_MODE_HEAT_COOL in self._hvac_list:
support |= SUPPORT_TARGET_TEMPERATURE_RANGE
if self._preset_list and PRESET_AWAY in self._preset_list:
support |= SUPPORT_TARGET_TEMPERATURE_RANGE
if self.values.fan_mode:
support |= SUPPORT_FAN_MODE
if self._zxt_120 == 1 and self.values.zxt_120_swing_mode:
support |= SUPPORT_SWING_MODE
if self._aux_heat:
support |= SUPPORT_AUX_HEAT
if self._preset_list:
support |= SUPPORT_PRESET_MODE
return support
def update_properties(self):
"""Handle the data changes for node values."""
# Operation Mode
self._update_operation_mode()
# Current Temp
self._update_current_temp()
# Fan Mode
self._update_fan_mode()
# Swing mode
self._update_swing_mode()
# Set point
self._update_target_temp()
# Operating state
self._update_operating_state()
# Fan operating state
self._update_fan_state()
def _update_operation_mode(self):
"""Update hvac and preset modes."""
if self._mode():
self._hvac_list = []
self._hvac_mapping = {}
self._preset_list = []
self._preset_mapping = {}
mode_list = self._mode().data_items
if mode_list:
for mode in mode_list:
ha_mode = HVAC_STATE_MAPPINGS.get(str(mode).lower())
ha_preset = PRESET_MAPPINGS.get(str(mode).lower())
if mode == AUX_HEAT_ZWAVE_MODE:
# Aux Heat should not be included in any mapping
self._aux_heat = True
elif ha_mode and ha_mode not in self._hvac_mapping:
self._hvac_mapping[ha_mode] = mode
self._hvac_list.append(ha_mode)
elif ha_preset and ha_preset not in self._preset_mapping:
self._preset_mapping[ha_preset] = mode
self._preset_list.append(ha_preset)
else:
# If nothing matches
self._preset_list.append(mode)
# Default operation mode
for mode in DEFAULT_HVAC_MODES:
if mode in self._hvac_mapping.keys():
self._default_hvac_mode = mode
break
if self._preset_list:
# Presets are supported
self._preset_list.append(PRESET_NONE)
current_mode = self._mode().data
_LOGGER.debug("current_mode=%s", current_mode)
_hvac_temp = next(
(
key
for key, value in self._hvac_mapping.items()
if value == current_mode
),
None,
)
if _hvac_temp is None:
# The current mode is not a hvac mode
if (
"heat" in current_mode.lower()
and HVAC_MODE_HEAT in self._hvac_mapping.keys()
):
# The current preset modes maps to HVAC_MODE_HEAT
_LOGGER.debug("Mapped to HEAT")
self._hvac_mode = HVAC_MODE_HEAT
elif (
"cool" in current_mode.lower()
and HVAC_MODE_COOL in self._hvac_mapping.keys()
):
# The current preset modes maps to HVAC_MODE_COOL
_LOGGER.debug("Mapped to COOL")
self._hvac_mode = HVAC_MODE_COOL
else:
# The current preset modes maps to self._default_hvac_mode
_LOGGER.debug("Mapped to DEFAULT")
self._hvac_mode = self._default_hvac_mode
self._preset_mode = next(
(
key
for key, value in self._preset_mapping.items()
if value == current_mode
),
current_mode,
)
else:
# The current mode is a hvac mode
self._hvac_mode = _hvac_temp
self._preset_mode = PRESET_NONE
_LOGGER.debug("self._hvac_mapping=%s", self._hvac_mapping)
_LOGGER.debug("self._hvac_list=%s", self._hvac_list)
_LOGGER.debug("self._hvac_mode=%s", self._hvac_mode)
_LOGGER.debug("self._default_hvac_mode=%s", self._default_hvac_mode)
_LOGGER.debug("self._hvac_action=%s", self._hvac_action)
_LOGGER.debug("self._aux_heat=%s", self._aux_heat)
_LOGGER.debug("self._preset_mapping=%s", self._preset_mapping)
_LOGGER.debug("self._preset_list=%s", self._preset_list)
_LOGGER.debug("self._preset_mode=%s", self._preset_mode)
def _update_current_temp(self):
"""Update current temperature."""
if self.values.temperature:
self._current_temperature = self.values.temperature.data
device_unit = self.values.temperature.units
if device_unit is not None:
self._unit = device_unit
def _update_fan_mode(self):
"""Update fan mode."""
if self.values.fan_mode:
self._current_fan_mode = self.values.fan_mode.data
fan_modes = self.values.fan_mode.data_items
if fan_modes:
self._fan_modes = list(fan_modes)
_LOGGER.debug("self._fan_modes=%s", self._fan_modes)
_LOGGER.debug("self._current_fan_mode=%s", self._current_fan_mode)
def _update_swing_mode(self):
"""Update swing mode."""
if self._zxt_120 == 1:
if self.values.zxt_120_swing_mode:
self._current_swing_mode = self.values.zxt_120_swing_mode.data
swing_modes = self.values.zxt_120_swing_mode.data_items
if swing_modes:
self._swing_modes = list(swing_modes)
_LOGGER.debug("self._swing_modes=%s", self._swing_modes)
_LOGGER.debug("self._current_swing_mode=%s", self._current_swing_mode)
def _update_target_temp(self):
"""Update target temperature."""
current_setpoints = self._current_mode_setpoints()
self._target_temperature = None
self._target_temperature_range = (None, None)
if len(current_setpoints) == 1:
(setpoint,) = current_setpoints
if setpoint is not None:
self._target_temperature = round((float(setpoint.data)), 1)
elif len(current_setpoints) == 2:
(setpoint_low, setpoint_high) = current_setpoints
target_low, target_high = None, None
if setpoint_low is not None:
target_low = round((float(setpoint_low.data)), 1)
if setpoint_high is not None:
target_high = round((float(setpoint_high.data)), 1)
self._target_temperature_range = (target_low, target_high)
def _update_operating_state(self):
"""Update operating state."""
if self.values.operating_state:
mode = self.values.operating_state.data
self._hvac_action = HVAC_CURRENT_MAPPINGS.get(str(mode).lower(), mode)
def _update_fan_state(self):
"""Update fan state."""
if self.values.fan_action:
self._fan_action = self.values.fan_action.data
@property
def fan_mode(self):
"""Return the fan speed set."""
return self._current_fan_mode
@property
def fan_modes(self):
"""Return a list of available fan modes."""
return self._fan_modes
@property
def swing_mode(self):
"""Return the swing mode set."""
return self._current_swing_mode
@property
def swing_modes(self):
"""Return a list of available swing modes."""
return self._swing_modes
@property
def temperature_unit(self):
"""Return the unit of measurement."""
if self._unit == "C":
return TEMP_CELSIUS
if self._unit == "F":
return TEMP_FAHRENHEIT
return self._unit
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def hvac_mode(self):
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
if self._mode():
return self._hvac_mode
return self._default_hvac_mode
@property
def hvac_modes(self):
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
if self._mode():
return self._hvac_list
return []
@property
def hvac_action(self):
"""Return the current running hvac operation if supported.
Need to be one of CURRENT_HVAC_*.
"""
return self._hvac_action
@property
def is_aux_heat(self):
"""Return true if aux heater."""
if not self._aux_heat:
return None
if self._mode().data == AUX_HEAT_ZWAVE_MODE:
return True
return False
@property
def preset_mode(self):
"""Return preset operation ie. eco, away.
Need to be one of PRESET_*.
"""
if self._mode():
return self._preset_mode
return PRESET_NONE
@property
def preset_modes(self):
"""Return the list of available preset operation modes.
Need to be a subset of PRESET_MODES.
"""
if self._mode():
return self._preset_list
return []
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def target_temperature_low(self) -> Optional[float]:
"""Return the lowbound target temperature we try to reach."""
return self._target_temperature_range[0]
@property
def target_temperature_high(self) -> Optional[float]:
"""Return the highbound target temperature we try to reach."""
return self._target_temperature_range[1]
def set_temperature(self, **kwargs):
"""Set new target temperature."""
current_setpoints = self._current_mode_setpoints()
if len(current_setpoints) == 1:
(setpoint,) = current_setpoints
target_temp = kwargs.get(ATTR_TEMPERATURE)
if setpoint is not None and target_temp is not None:
_LOGGER.debug("Set temperature to %s", target_temp)
setpoint.data = target_temp
elif len(current_setpoints) == 2:
(setpoint_low, setpoint_high) = current_setpoints
target_temp_low = kwargs.get(ATTR_TARGET_TEMP_LOW)
target_temp_high = kwargs.get(ATTR_TARGET_TEMP_HIGH)
if setpoint_low is not None and target_temp_low is not None:
_LOGGER.debug("Set low temperature to %s", target_temp_low)
setpoint_low.data = target_temp_low
if setpoint_high is not None and target_temp_high is not None:
_LOGGER.debug("Set high temperature to %s", target_temp_high)
setpoint_high.data = target_temp_high
def set_fan_mode(self, fan_mode):
"""Set new target fan mode."""
_LOGGER.debug("Set fan mode to %s", fan_mode)
if not self.values.fan_mode:
return
self.values.fan_mode.data = fan_mode
def set_hvac_mode(self, hvac_mode):
"""Set new target hvac mode."""
_LOGGER.debug("Set hvac_mode to %s", hvac_mode)
if not self._mode():
return
operation_mode = self._hvac_mapping.get(hvac_mode)
_LOGGER.debug("Set operation_mode to %s", operation_mode)
self._mode().data = operation_mode
def turn_aux_heat_on(self):
"""Turn auxiliary heater on."""
if not self._aux_heat:
return
operation_mode = AUX_HEAT_ZWAVE_MODE
_LOGGER.debug("Aux heat on. Set operation mode to %s", operation_mode)
self._mode().data = operation_mode
def turn_aux_heat_off(self):
"""Turn auxiliary heater off."""
if not self._aux_heat:
return
if HVAC_MODE_HEAT in self._hvac_mapping:
operation_mode = self._hvac_mapping.get(HVAC_MODE_HEAT)
else:
operation_mode = self._hvac_mapping.get(HVAC_MODE_OFF)
_LOGGER.debug("Aux heat off. Set operation mode to %s", operation_mode)
self._mode().data = operation_mode
def set_preset_mode(self, preset_mode):
"""Set new target preset mode."""
_LOGGER.debug("Set preset_mode to %s", preset_mode)
if not self._mode():
return
if preset_mode == PRESET_NONE:
# Activate the current hvac mode
self._update_operation_mode()
operation_mode = self._hvac_mapping.get(self.hvac_mode)
_LOGGER.debug("Set operation_mode to %s", operation_mode)
self._mode().data = operation_mode
else:
operation_mode = self._preset_mapping.get(preset_mode, preset_mode)
_LOGGER.debug("Set operation_mode to %s", operation_mode)
self._mode().data = operation_mode
def set_swing_mode(self, swing_mode):
"""Set new target swing mode."""
_LOGGER.debug("Set swing_mode to %s", swing_mode)
if self._zxt_120 == 1:
if self.values.zxt_120_swing_mode:
self.values.zxt_120_swing_mode.data = swing_mode
@property
def device_state_attributes(self):
"""Return the optional state attributes."""
data = super().device_state_attributes
if self._fan_action:
data[ATTR_FAN_ACTION] = self._fan_action
return data
class ZWaveClimateSingleSetpoint(ZWaveClimateBase):
"""Representation of a single setpoint Z-Wave thermostat device."""
def __init__(self, values, temp_unit):
"""Initialize the Z-Wave climate device."""
ZWaveClimateBase.__init__(self, values, temp_unit)
def _mode(self) -> None:
"""Return thermostat mode Z-Wave value."""
return self.values.mode
def _current_mode_setpoints(self) -> Tuple:
"""Return a tuple of current setpoint Z-Wave value(s)."""
return (self.values.primary,)
class ZWaveClimateMultipleSetpoint(ZWaveClimateBase):
"""Representation of a multiple setpoint Z-Wave thermostat device."""
def __init__(self, values, temp_unit):
"""Initialize the Z-Wave climate device."""
ZWaveClimateBase.__init__(self, values, temp_unit)
def _mode(self) -> None:
"""Return thermostat mode Z-Wave value."""
return self.values.primary
def _current_mode_setpoints(self) -> Tuple:
"""Return a tuple of current setpoint Z-Wave value(s)."""
current_mode = str(self.values.primary.data).lower()
setpoints_names = MODE_SETPOINT_MAPPINGS.get(current_mode, ())
return tuple(getattr(self.values, name, None) for name in setpoints_names)
|
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import ddt
from eventlet import timeout as etimeout
import mock
from os_win import constants as os_win_const
from os_win import exceptions as os_win_exc
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import fileutils
from oslo_utils import units
from nova.compute import vm_states
from nova import exception
from nova import objects
from nova.objects import fields
from nova.objects import flavor as flavor_obj
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_flavor
from nova.tests.unit.objects import test_virtual_interface
from nova.tests.unit.virt.hyperv import test_base
from nova.virt import hardware
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmops
from nova.virt.hyperv import volumeops
CONF = cfg.CONF
@ddt.ddt
class VMOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V VMOps class."""
_FAKE_TIMEOUT = 2
FAKE_SIZE = 10
FAKE_DIR = 'fake_dir'
FAKE_ROOT_PATH = 'C:\\path\\to\\fake.%s'
FAKE_CONFIG_DRIVE_ISO = 'configdrive.iso'
FAKE_CONFIG_DRIVE_VHD = 'configdrive.vhd'
FAKE_UUID = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3'
FAKE_LOG = 'fake_log'
_WIN_VERSION_6_3 = '6.3.0'
_WIN_VERSION_10 = '10.0'
ISO9660 = 'iso9660'
_FAKE_CONFIGDRIVE_PATH = 'C:/fake_instance_dir/configdrive.vhd'
def setUp(self):
super(VMOpsTestCase, self).setUp()
self.context = 'fake-context'
self._vmops = vmops.VMOps(virtapi=mock.MagicMock())
self._vmops._vmutils = mock.MagicMock()
self._vmops._metricsutils = mock.MagicMock()
self._vmops._vhdutils = mock.MagicMock()
self._vmops._pathutils = mock.MagicMock()
self._vmops._hostutils = mock.MagicMock()
self._vmops._migrutils = mock.MagicMock()
self._vmops._serial_console_ops = mock.MagicMock()
self._vmops._block_dev_man = mock.MagicMock()
self._vmops._vif_driver = mock.MagicMock()
def test_list_instances(self):
mock_instance = mock.MagicMock()
self._vmops._vmutils.list_instances.return_value = [mock_instance]
response = self._vmops.list_instances()
self._vmops._vmutils.list_instances.assert_called_once_with()
self.assertEqual(response, [mock_instance])
def _test_get_info(self, vm_exists):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_info = mock.MagicMock(spec_set=dict)
fake_info = {'EnabledState': 2,
'MemoryUsage': mock.sentinel.FAKE_MEM_KB,
'NumberOfProcessors': mock.sentinel.FAKE_NUM_CPU,
'UpTime': mock.sentinel.FAKE_CPU_NS}
def getitem(key):
return fake_info[key]
mock_info.__getitem__.side_effect = getitem
expected = hardware.InstanceInfo(state=constants.HYPERV_POWER_STATE[2])
self._vmops._vmutils.vm_exists.return_value = vm_exists
self._vmops._vmutils.get_vm_summary_info.return_value = mock_info
if not vm_exists:
self.assertRaises(exception.InstanceNotFound,
self._vmops.get_info, mock_instance)
else:
response = self._vmops.get_info(mock_instance)
self._vmops._vmutils.vm_exists.assert_called_once_with(
mock_instance.name)
self._vmops._vmutils.get_vm_summary_info.assert_called_once_with(
mock_instance.name)
self.assertEqual(response, expected)
def test_get_info(self):
self._test_get_info(vm_exists=True)
def test_get_info_exception(self):
self._test_get_info(vm_exists=False)
@mock.patch.object(vmops.VMOps, 'check_vm_image_type')
@mock.patch.object(vmops.VMOps, '_create_root_vhd')
def test_create_root_device_type_disk(self, mock_create_root_device,
mock_check_vm_image_type):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_root_disk_info = {'type': constants.DISK}
self._vmops._create_root_device(self.context, mock_instance,
mock_root_disk_info,
mock.sentinel.VM_GEN_1)
mock_create_root_device.assert_called_once_with(
self.context, mock_instance)
mock_check_vm_image_type.assert_called_once_with(
mock_instance.uuid, mock.sentinel.VM_GEN_1,
mock_create_root_device.return_value)
def _prepare_create_root_device_mocks(self, use_cow_images, vhd_format,
vhd_size):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_instance.flavor.root_gb = self.FAKE_SIZE
self.flags(use_cow_images=use_cow_images)
self._vmops._vhdutils.get_vhd_info.return_value = {'VirtualSize':
vhd_size * units.Gi}
self._vmops._vhdutils.get_vhd_format.return_value = vhd_format
root_vhd_internal_size = mock_instance.flavor.root_gb * units.Gi
get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size
get_size.return_value = root_vhd_internal_size
self._vmops._pathutils.exists.return_value = True
return mock_instance
@mock.patch('nova.virt.hyperv.imagecache.ImageCache.get_cached_image')
def _test_create_root_vhd_exception(self, mock_get_cached_image,
vhd_format):
mock_instance = self._prepare_create_root_device_mocks(
use_cow_images=False, vhd_format=vhd_format,
vhd_size=(self.FAKE_SIZE + 1))
fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format
mock_get_cached_image.return_value = fake_vhd_path
fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value
self.assertRaises(exception.FlavorDiskSmallerThanImage,
self._vmops._create_root_vhd, self.context,
mock_instance)
self.assertFalse(self._vmops._vhdutils.resize_vhd.called)
self._vmops._pathutils.exists.assert_called_once_with(
fake_root_path)
self._vmops._pathutils.remove.assert_called_once_with(
fake_root_path)
@mock.patch('nova.virt.hyperv.imagecache.ImageCache.get_cached_image')
def _test_create_root_vhd_qcow(self, mock_get_cached_image, vhd_format):
mock_instance = self._prepare_create_root_device_mocks(
use_cow_images=True, vhd_format=vhd_format,
vhd_size=(self.FAKE_SIZE - 1))
fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format
mock_get_cached_image.return_value = fake_vhd_path
fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value
root_vhd_internal_size = mock_instance.flavor.root_gb * units.Gi
get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size
response = self._vmops._create_root_vhd(context=self.context,
instance=mock_instance)
self.assertEqual(fake_root_path, response)
self._vmops._pathutils.get_root_vhd_path.assert_called_with(
mock_instance.name, vhd_format, False)
differencing_vhd = self._vmops._vhdutils.create_differencing_vhd
differencing_vhd.assert_called_with(fake_root_path, fake_vhd_path)
self._vmops._vhdutils.get_vhd_info.assert_called_once_with(
fake_vhd_path)
if vhd_format is constants.DISK_FORMAT_VHD:
self.assertFalse(get_size.called)
self.assertFalse(self._vmops._vhdutils.resize_vhd.called)
else:
get_size.assert_called_once_with(fake_vhd_path,
root_vhd_internal_size)
self._vmops._vhdutils.resize_vhd.assert_called_once_with(
fake_root_path, root_vhd_internal_size, is_file_max_size=False)
@mock.patch('nova.virt.hyperv.imagecache.ImageCache.get_cached_image')
def _test_create_root_vhd(self, mock_get_cached_image, vhd_format,
is_rescue_vhd=False):
mock_instance = self._prepare_create_root_device_mocks(
use_cow_images=False, vhd_format=vhd_format,
vhd_size=(self.FAKE_SIZE - 1))
fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format
mock_get_cached_image.return_value = fake_vhd_path
rescue_image_id = (
mock.sentinel.rescue_image_id if is_rescue_vhd else None)
fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value
root_vhd_internal_size = mock_instance.flavor.root_gb * units.Gi
get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size
response = self._vmops._create_root_vhd(
context=self.context,
instance=mock_instance,
rescue_image_id=rescue_image_id)
self.assertEqual(fake_root_path, response)
mock_get_cached_image.assert_called_once_with(self.context,
mock_instance,
rescue_image_id)
self._vmops._pathutils.get_root_vhd_path.assert_called_with(
mock_instance.name, vhd_format, is_rescue_vhd)
self._vmops._pathutils.copyfile.assert_called_once_with(
fake_vhd_path, fake_root_path)
get_size.assert_called_once_with(fake_vhd_path, root_vhd_internal_size)
if is_rescue_vhd:
self.assertFalse(self._vmops._vhdutils.resize_vhd.called)
else:
self._vmops._vhdutils.resize_vhd.assert_called_once_with(
fake_root_path, root_vhd_internal_size,
is_file_max_size=False)
def test_create_root_vhd(self):
self._test_create_root_vhd(vhd_format=constants.DISK_FORMAT_VHD)
def test_create_root_vhdx(self):
self._test_create_root_vhd(vhd_format=constants.DISK_FORMAT_VHDX)
def test_create_root_vhd_use_cow_images_true(self):
self._test_create_root_vhd_qcow(vhd_format=constants.DISK_FORMAT_VHD)
def test_create_root_vhdx_use_cow_images_true(self):
self._test_create_root_vhd_qcow(vhd_format=constants.DISK_FORMAT_VHDX)
def test_create_rescue_vhd(self):
self._test_create_root_vhd(vhd_format=constants.DISK_FORMAT_VHD,
is_rescue_vhd=True)
def test_create_root_vhdx_size_less_than_internal(self):
self._test_create_root_vhd_exception(
vhd_format=constants.DISK_FORMAT_VHD)
def test_is_resize_needed_exception(self):
inst = mock.MagicMock()
self.assertRaises(
exception.FlavorDiskSmallerThanImage,
self._vmops._is_resize_needed,
mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE - 1, inst)
def test_is_resize_needed_true(self):
inst = mock.MagicMock()
self.assertTrue(self._vmops._is_resize_needed(
mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE + 1, inst))
def test_is_resize_needed_false(self):
inst = mock.MagicMock()
self.assertFalse(self._vmops._is_resize_needed(
mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE, inst))
@mock.patch.object(vmops.VMOps, 'create_ephemeral_disk')
def test_create_ephemerals(self, mock_create_ephemeral_disk):
mock_instance = fake_instance.fake_instance_obj(self.context)
fake_ephemerals = [dict(), dict()]
self._vmops._vhdutils.get_best_supported_vhd_format.return_value = (
mock.sentinel.format)
self._vmops._pathutils.get_ephemeral_vhd_path.side_effect = [
mock.sentinel.FAKE_PATH0, mock.sentinel.FAKE_PATH1]
self._vmops._create_ephemerals(mock_instance, fake_ephemerals)
self._vmops._pathutils.get_ephemeral_vhd_path.assert_has_calls(
[mock.call(mock_instance.name, mock.sentinel.format, 'eph0'),
mock.call(mock_instance.name, mock.sentinel.format, 'eph1')])
mock_create_ephemeral_disk.assert_has_calls(
[mock.call(mock_instance.name, fake_ephemerals[0]),
mock.call(mock_instance.name, fake_ephemerals[1])])
def test_create_ephemeral_disk(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_ephemeral_info = {'path': 'fake_eph_path',
'size': 10}
self._vmops.create_ephemeral_disk(mock_instance.name,
mock_ephemeral_info)
mock_create_dynamic_vhd = self._vmops._vhdutils.create_dynamic_vhd
mock_create_dynamic_vhd.assert_called_once_with('fake_eph_path',
10 * units.Gi)
@mock.patch.object(vmops.objects, 'PCIDeviceBus')
@mock.patch.object(vmops.objects, 'NetworkInterfaceMetadata')
@mock.patch.object(vmops.objects.VirtualInterfaceList,
'get_by_instance_uuid')
def test_get_vif_metadata(self, mock_get_by_inst_uuid,
mock_NetworkInterfaceMetadata, mock_PCIDevBus):
mock_vif = mock.MagicMock(tag='taggy')
mock_vif.__contains__.side_effect = (
lambda attr: getattr(mock_vif, attr, None) is not None)
mock_get_by_inst_uuid.return_value = [mock_vif,
mock.MagicMock(tag=None)]
vif_metadata = self._vmops._get_vif_metadata(self.context,
mock.sentinel.instance_id)
mock_get_by_inst_uuid.assert_called_once_with(
self.context, mock.sentinel.instance_id)
mock_NetworkInterfaceMetadata.assert_called_once_with(
mac=mock_vif.address,
bus=mock_PCIDevBus.return_value,
tags=[mock_vif.tag])
self.assertEqual([mock_NetworkInterfaceMetadata.return_value],
vif_metadata)
@mock.patch.object(vmops.objects, 'InstanceDeviceMetadata')
@mock.patch.object(vmops.VMOps, '_get_vif_metadata')
def test_save_device_metadata(self, mock_get_vif_metadata,
mock_InstanceDeviceMetadata):
mock_instance = mock.MagicMock()
mock_get_vif_metadata.return_value = [mock.sentinel.vif_metadata]
self._vmops._block_dev_man.get_bdm_metadata.return_value = [
mock.sentinel.bdm_metadata]
self._vmops._save_device_metadata(self.context, mock_instance,
mock.sentinel.block_device_info)
mock_get_vif_metadata.assert_called_once_with(self.context,
mock_instance.uuid)
self._vmops._block_dev_man.get_bdm_metadata.assert_called_once_with(
self.context, mock_instance, mock.sentinel.block_device_info)
expected_metadata = [mock.sentinel.vif_metadata,
mock.sentinel.bdm_metadata]
mock_InstanceDeviceMetadata.assert_called_once_with(
devices=expected_metadata)
self.assertEqual(mock_InstanceDeviceMetadata.return_value,
mock_instance.device_metadata)
def test_set_boot_order(self):
self._vmops.set_boot_order(mock.sentinel.instance_name,
mock.sentinel.vm_gen,
mock.sentinel.bdi)
mock_get_boot_order = self._vmops._block_dev_man.get_boot_order
mock_get_boot_order.assert_called_once_with(
mock.sentinel.vm_gen, mock.sentinel.bdi)
self._vmops._vmutils.set_boot_order.assert_called_once_with(
mock.sentinel.instance_name, mock_get_boot_order.return_value)
@mock.patch.object(vmops.VMOps, 'plug_vifs')
@mock.patch('nova.virt.hyperv.vmops.VMOps.destroy')
@mock.patch('nova.virt.hyperv.vmops.VMOps.power_on')
@mock.patch('nova.virt.hyperv.vmops.VMOps.set_boot_order')
@mock.patch('nova.virt.hyperv.vmops.VMOps.attach_config_drive')
@mock.patch('nova.virt.hyperv.vmops.VMOps._create_config_drive')
@mock.patch('nova.virt.configdrive.required_by')
@mock.patch('nova.virt.hyperv.vmops.VMOps._save_device_metadata')
@mock.patch('nova.virt.hyperv.vmops.VMOps.create_instance')
@mock.patch('nova.virt.hyperv.vmops.VMOps.get_image_vm_generation')
@mock.patch('nova.virt.hyperv.vmops.VMOps._create_ephemerals')
@mock.patch('nova.virt.hyperv.vmops.VMOps._create_root_device')
@mock.patch('nova.virt.hyperv.vmops.VMOps._delete_disk_files')
@mock.patch('nova.virt.hyperv.vmops.VMOps._get_neutron_events',
return_value=[])
def _test_spawn(self, mock_get_neutron_events,
mock_delete_disk_files, mock_create_root_device,
mock_create_ephemerals, mock_get_image_vm_gen,
mock_create_instance, mock_save_device_metadata,
mock_configdrive_required,
mock_create_config_drive, mock_attach_config_drive,
mock_set_boot_order,
mock_power_on, mock_destroy, mock_plug_vifs,
exists, configdrive_required, fail,
fake_vm_gen=constants.VM_GEN_2):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_image_meta = mock.MagicMock()
root_device_info = mock.sentinel.ROOT_DEV_INFO
mock_get_image_vm_gen.return_value = fake_vm_gen
fake_config_drive_path = mock_create_config_drive.return_value
block_device_info = {'ephemerals': [], 'root_disk': root_device_info}
self._vmops._vmutils.vm_exists.return_value = exists
mock_configdrive_required.return_value = configdrive_required
mock_create_instance.side_effect = fail
if exists:
self.assertRaises(exception.InstanceExists, self._vmops.spawn,
self.context, mock_instance, mock_image_meta,
[mock.sentinel.FILE], mock.sentinel.PASSWORD,
mock.sentinel.network_info, block_device_info)
elif fail is os_win_exc.HyperVException:
self.assertRaises(os_win_exc.HyperVException, self._vmops.spawn,
self.context, mock_instance, mock_image_meta,
[mock.sentinel.FILE], mock.sentinel.PASSWORD,
mock.sentinel.network_info, block_device_info)
mock_destroy.assert_called_once_with(mock_instance,
mock.sentinel.network_info,
block_device_info)
else:
self._vmops.spawn(self.context, mock_instance, mock_image_meta,
[mock.sentinel.FILE], mock.sentinel.PASSWORD,
mock.sentinel.network_info, block_device_info)
self._vmops._vmutils.vm_exists.assert_called_once_with(
mock_instance.name)
mock_delete_disk_files.assert_called_once_with(
mock_instance.name)
mock_validate_and_update_bdi = (
self._vmops._block_dev_man.validate_and_update_bdi)
mock_validate_and_update_bdi.assert_called_once_with(
mock_instance, mock_image_meta, fake_vm_gen, block_device_info)
mock_create_root_device.assert_called_once_with(self.context,
mock_instance,
root_device_info,
fake_vm_gen)
mock_create_ephemerals.assert_called_once_with(
mock_instance, block_device_info['ephemerals'])
mock_get_neutron_events.assert_called_once_with(
mock.sentinel.network_info)
mock_get_image_vm_gen.assert_called_once_with(mock_instance.uuid,
mock_image_meta)
mock_create_instance.assert_called_once_with(
mock_instance, mock.sentinel.network_info, root_device_info,
block_device_info, fake_vm_gen, mock_image_meta)
mock_plug_vifs.assert_called_once_with(mock_instance,
mock.sentinel.network_info)
mock_save_device_metadata.assert_called_once_with(
self.context, mock_instance, block_device_info)
mock_configdrive_required.assert_called_once_with(mock_instance)
if configdrive_required:
mock_create_config_drive.assert_called_once_with(
self.context, mock_instance, [mock.sentinel.FILE],
mock.sentinel.PASSWORD,
mock.sentinel.network_info)
mock_attach_config_drive.assert_called_once_with(
mock_instance, fake_config_drive_path, fake_vm_gen)
mock_set_boot_order.assert_called_once_with(
mock_instance.name, fake_vm_gen, block_device_info)
mock_power_on.assert_called_once_with(
mock_instance,
network_info=mock.sentinel.network_info,
should_plug_vifs=False)
def test_spawn(self):
self._test_spawn(exists=False, configdrive_required=True, fail=None)
def test_spawn_instance_exists(self):
self._test_spawn(exists=True, configdrive_required=True, fail=None)
def test_spawn_create_instance_exception(self):
self._test_spawn(exists=False, configdrive_required=True,
fail=os_win_exc.HyperVException)
def test_spawn_not_required(self):
self._test_spawn(exists=False, configdrive_required=False, fail=None)
def test_spawn_no_admin_permissions(self):
self._vmops._vmutils.check_admin_permissions.side_effect = (
os_win_exc.HyperVException)
self.assertRaises(os_win_exc.HyperVException,
self._vmops.spawn,
self.context, mock.DEFAULT, mock.DEFAULT,
[mock.sentinel.FILE], mock.sentinel.PASSWORD,
mock.sentinel.INFO, mock.sentinel.DEV_INFO)
@mock.patch.object(vmops.VMOps, '_get_neutron_events')
def test_wait_vif_plug_events(self, mock_get_events):
self._vmops._virtapi.wait_for_instance_event.side_effect = (
etimeout.Timeout)
self.flags(vif_plugging_timeout=1)
self.flags(vif_plugging_is_fatal=True)
def _context_user():
with self._vmops.wait_vif_plug_events(mock.sentinel.instance,
mock.sentinel.network_info):
pass
self.assertRaises(exception.VirtualInterfaceCreateException,
_context_user)
mock_get_events.assert_called_once_with(mock.sentinel.network_info)
self._vmops._virtapi.wait_for_instance_event.assert_called_once_with(
mock.sentinel.instance, mock_get_events.return_value,
deadline=CONF.vif_plugging_timeout,
error_callback=self._vmops._neutron_failed_callback)
@mock.patch.object(vmops.VMOps, '_get_neutron_events')
def test_wait_vif_plug_events_port_binding_failed(self, mock_get_events):
mock_get_events.side_effect = exception.PortBindingFailed(
port_id='fake_id')
def _context_user():
with self._vmops.wait_vif_plug_events(mock.sentinel.instance,
mock.sentinel.network_info):
pass
self.assertRaises(exception.PortBindingFailed, _context_user)
def test_neutron_failed_callback(self):
self.flags(vif_plugging_is_fatal=True)
self.assertRaises(exception.VirtualInterfaceCreateException,
self._vmops._neutron_failed_callback,
mock.sentinel.event_name, mock.sentinel.instance)
def test_get_neutron_events(self):
network_info = [{'id': mock.sentinel.vif_id1, 'active': True},
{'id': mock.sentinel.vif_id2, 'active': False},
{'id': mock.sentinel.vif_id3}]
events = self._vmops._get_neutron_events(network_info)
self.assertEqual([('network-vif-plugged', mock.sentinel.vif_id2)],
events)
def test_get_neutron_events_no_timeout(self):
self.flags(vif_plugging_timeout=0)
network_info = [{'id': mock.sentinel.vif_id1, 'active': True}]
events = self._vmops._get_neutron_events(network_info)
self.assertEqual([], events)
@mock.patch.object(vmops.VMOps, '_attach_pci_devices')
@mock.patch.object(vmops.VMOps, '_requires_secure_boot')
@mock.patch.object(vmops.VMOps, '_requires_certificate')
@mock.patch.object(vmops.VMOps, '_get_instance_vnuma_config')
@mock.patch('nova.virt.hyperv.volumeops.VolumeOps'
'.attach_volumes')
@mock.patch.object(vmops.VMOps, '_set_instance_disk_qos_specs')
@mock.patch.object(vmops.VMOps, '_create_vm_com_port_pipes')
@mock.patch.object(vmops.VMOps, '_attach_ephemerals')
@mock.patch.object(vmops.VMOps, '_attach_root_device')
@mock.patch.object(vmops.VMOps, '_configure_remotefx')
def _test_create_instance(self, mock_configure_remotefx,
mock_attach_root_device,
mock_attach_ephemerals,
mock_create_pipes,
mock_set_qos_specs,
mock_attach_volumes,
mock_get_vnuma_config,
mock_requires_certificate,
mock_requires_secure_boot,
mock_attach_pci_devices,
enable_instance_metrics,
vm_gen=constants.VM_GEN_1,
vnuma_enabled=False,
pci_requests=None):
self.flags(dynamic_memory_ratio=2.0, group='hyperv')
self.flags(enable_instance_metrics_collection=enable_instance_metrics,
group='hyperv')
root_device_info = mock.sentinel.ROOT_DEV_INFO
block_device_info = {'ephemerals': [], 'block_device_mapping': []}
fake_network_info = {'id': mock.sentinel.ID,
'address': mock.sentinel.ADDRESS}
mock_instance = fake_instance.fake_instance_obj(self.context)
instance_path = os.path.join(CONF.instances_path, mock_instance.name)
mock_requires_secure_boot.return_value = True
flavor = flavor_obj.Flavor(**test_flavor.fake_flavor)
mock_instance.flavor = flavor
instance_pci_requests = objects.InstancePCIRequests(
requests=pci_requests or [], instance_uuid=mock_instance.uuid)
mock_instance.pci_requests = instance_pci_requests
host_shutdown_action = (os_win_const.HOST_SHUTDOWN_ACTION_SHUTDOWN
if pci_requests else None)
if vnuma_enabled:
mock_get_vnuma_config.return_value = (
mock.sentinel.mem_per_numa, mock.sentinel.cpus_per_numa)
cpus_per_numa = mock.sentinel.cpus_per_numa
mem_per_numa = mock.sentinel.mem_per_numa
dynamic_memory_ratio = 1.0
else:
mock_get_vnuma_config.return_value = (None, None)
mem_per_numa, cpus_per_numa = (None, None)
dynamic_memory_ratio = CONF.hyperv.dynamic_memory_ratio
self._vmops.create_instance(instance=mock_instance,
network_info=[fake_network_info],
root_device=root_device_info,
block_device_info=block_device_info,
vm_gen=vm_gen,
image_meta=mock.sentinel.image_meta)
mock_get_vnuma_config.assert_called_once_with(mock_instance,
mock.sentinel.image_meta)
self._vmops._vmutils.create_vm.assert_called_once_with(
mock_instance.name, vnuma_enabled, vm_gen,
instance_path, [mock_instance.uuid])
self._vmops._vmutils.update_vm.assert_called_once_with(
mock_instance.name, mock_instance.flavor.memory_mb, mem_per_numa,
mock_instance.flavor.vcpus, cpus_per_numa,
CONF.hyperv.limit_cpu_features, dynamic_memory_ratio,
host_shutdown_action=host_shutdown_action)
mock_configure_remotefx.assert_called_once_with(mock_instance, vm_gen)
mock_create_scsi_ctrl = self._vmops._vmutils.create_scsi_controller
mock_create_scsi_ctrl.assert_called_once_with(mock_instance.name)
mock_attach_root_device.assert_called_once_with(mock_instance.name,
root_device_info)
mock_attach_ephemerals.assert_called_once_with(mock_instance.name,
block_device_info['ephemerals'])
mock_attach_volumes.assert_called_once_with(
block_device_info['block_device_mapping'], mock_instance.name)
self._vmops._vmutils.create_nic.assert_called_once_with(
mock_instance.name, mock.sentinel.ID, mock.sentinel.ADDRESS)
mock_enable = self._vmops._metricsutils.enable_vm_metrics_collection
if enable_instance_metrics:
mock_enable.assert_called_once_with(mock_instance.name)
mock_set_qos_specs.assert_called_once_with(mock_instance)
mock_requires_secure_boot.assert_called_once_with(
mock_instance, mock.sentinel.image_meta, vm_gen)
mock_requires_certificate.assert_called_once_with(
mock.sentinel.image_meta)
enable_secure_boot = self._vmops._vmutils.enable_secure_boot
enable_secure_boot.assert_called_once_with(
mock_instance.name,
msft_ca_required=mock_requires_certificate.return_value)
mock_attach_pci_devices.assert_called_once_with(mock_instance)
def test_create_instance(self):
self._test_create_instance(enable_instance_metrics=True)
def test_create_instance_enable_instance_metrics_false(self):
self._test_create_instance(enable_instance_metrics=False)
def test_create_instance_gen2(self):
self._test_create_instance(enable_instance_metrics=False,
vm_gen=constants.VM_GEN_2)
def test_create_instance_vnuma_enabled(self):
self._test_create_instance(enable_instance_metrics=False,
vnuma_enabled=True)
def test_create_instance_pci_requested(self):
vendor_id = 'fake_vendor_id'
product_id = 'fake_product_id'
spec = {'vendor_id': vendor_id, 'product_id': product_id}
request = objects.InstancePCIRequest(count=1, spec=[spec])
self._test_create_instance(enable_instance_metrics=False,
pci_requests=[request])
def test_attach_pci_devices(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
vendor_id = 'fake_vendor_id'
product_id = 'fake_product_id'
spec = {'vendor_id': vendor_id, 'product_id': product_id}
request = objects.InstancePCIRequest(count=2, spec=[spec])
instance_pci_requests = objects.InstancePCIRequests(
requests=[request], instance_uuid=mock_instance.uuid)
mock_instance.pci_requests = instance_pci_requests
self._vmops._attach_pci_devices(mock_instance)
self._vmops._vmutils.add_pci_device.assert_has_calls(
[mock.call(mock_instance.name, vendor_id, product_id)] * 2)
@mock.patch.object(vmops.hardware, 'numa_get_constraints')
def _check_get_instance_vnuma_config_exception(self, mock_get_numa,
numa_cells):
flavor = {'extra_specs': {}}
mock_instance = mock.MagicMock(flavor=flavor)
image_meta = mock.MagicMock(properties={})
numa_topology = objects.InstanceNUMATopology(cells=numa_cells)
mock_get_numa.return_value = numa_topology
self.assertRaises(exception.InstanceUnacceptable,
self._vmops._get_instance_vnuma_config,
mock_instance, image_meta)
def test_get_instance_vnuma_config_bad_cpuset(self):
cell1 = objects.InstanceNUMACell(
cpuset=set([0]), pcpuset=set(), memory=1024)
cell2 = objects.InstanceNUMACell(
cpuset=set([1, 2]), pcpuset=set(), memory=1024)
self._check_get_instance_vnuma_config_exception(
numa_cells=[cell1, cell2])
def test_get_instance_vnuma_config_bad_memory(self):
cell1 = objects.InstanceNUMACell(
cpuset=set([0]), pcpuset=set(), memory=1024)
cell2 = objects.InstanceNUMACell(
cpuset=set([1]), pcpuset=set(), memory=2048)
self._check_get_instance_vnuma_config_exception(
numa_cells=[cell1, cell2])
def test_get_instance_vnuma_config_cpu_pinning(self):
cell1 = objects.InstanceNUMACell(
cpuset=set([0]), pcpuset=set(), memory=1024,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)
cell2 = objects.InstanceNUMACell(
cpuset=set([1]), pcpuset=set(), memory=1024,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)
self._check_get_instance_vnuma_config_exception(
numa_cells=[cell1, cell2])
@mock.patch.object(vmops.hardware, 'numa_get_constraints')
def _check_get_instance_vnuma_config(
self, mock_get_numa, numa_topology=None,
expected_mem_per_numa=None, expected_cpus_per_numa=None):
mock_instance = mock.MagicMock()
image_meta = mock.MagicMock()
mock_get_numa.return_value = numa_topology
result_memory_per_numa, result_cpus_per_numa = (
self._vmops._get_instance_vnuma_config(mock_instance, image_meta))
self.assertEqual(expected_cpus_per_numa, result_cpus_per_numa)
self.assertEqual(expected_mem_per_numa, result_memory_per_numa)
def test_get_instance_vnuma_config(self):
cell1 = objects.InstanceNUMACell(
cpuset=set([0]), pcpuset=set(), memory=2048)
cell2 = objects.InstanceNUMACell(
cpuset=set([1]), pcpuset=set(), memory=2048)
numa_topology = objects.InstanceNUMATopology(cells=[cell1, cell2])
self._check_get_instance_vnuma_config(numa_topology=numa_topology,
expected_cpus_per_numa=1,
expected_mem_per_numa=2048)
def test_get_instance_vnuma_config_no_topology(self):
self._check_get_instance_vnuma_config()
@mock.patch.object(vmops.volumeops.VolumeOps, 'attach_volume')
def test_attach_root_device_volume(self, mock_attach_volume):
mock_instance = fake_instance.fake_instance_obj(self.context)
root_device_info = {'type': constants.VOLUME,
'connection_info': mock.sentinel.CONN_INFO,
'disk_bus': constants.CTRL_TYPE_IDE}
self._vmops._attach_root_device(mock_instance.name, root_device_info)
mock_attach_volume.assert_called_once_with(
root_device_info['connection_info'], mock_instance.name,
disk_bus=root_device_info['disk_bus'])
@mock.patch.object(vmops.VMOps, '_attach_drive')
def test_attach_root_device_disk(self, mock_attach_drive):
mock_instance = fake_instance.fake_instance_obj(self.context)
root_device_info = {'type': constants.DISK,
'boot_index': 0,
'disk_bus': constants.CTRL_TYPE_IDE,
'path': 'fake_path',
'drive_addr': 0,
'ctrl_disk_addr': 1}
self._vmops._attach_root_device(mock_instance.name, root_device_info)
mock_attach_drive.assert_called_once_with(
mock_instance.name, root_device_info['path'],
root_device_info['drive_addr'], root_device_info['ctrl_disk_addr'],
root_device_info['disk_bus'], root_device_info['type'])
@mock.patch.object(vmops.VMOps, '_attach_drive')
def test_attach_ephemerals(self, mock_attach_drive):
mock_instance = fake_instance.fake_instance_obj(self.context)
ephemerals = [{'path': mock.sentinel.PATH1,
'boot_index': 1,
'disk_bus': constants.CTRL_TYPE_IDE,
'device_type': 'disk',
'drive_addr': 0,
'ctrl_disk_addr': 1},
{'path': mock.sentinel.PATH2,
'boot_index': 2,
'disk_bus': constants.CTRL_TYPE_SCSI,
'device_type': 'disk',
'drive_addr': 0,
'ctrl_disk_addr': 0},
{'path': None}]
self._vmops._attach_ephemerals(mock_instance.name, ephemerals)
mock_attach_drive.assert_has_calls(
[mock.call(mock_instance.name, mock.sentinel.PATH1, 0,
1, constants.CTRL_TYPE_IDE, constants.DISK),
mock.call(mock_instance.name, mock.sentinel.PATH2, 0,
0, constants.CTRL_TYPE_SCSI, constants.DISK)
])
def test_attach_drive_vm_to_scsi(self):
self._vmops._attach_drive(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH,
mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR,
constants.CTRL_TYPE_SCSI)
self._vmops._vmutils.attach_scsi_drive.assert_called_once_with(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH,
constants.DISK)
def test_attach_drive_vm_to_ide(self):
self._vmops._attach_drive(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH,
mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR,
constants.CTRL_TYPE_IDE)
self._vmops._vmutils.attach_ide_drive.assert_called_once_with(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH,
mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR,
constants.DISK)
def test_get_image_vm_generation_default(self):
image_meta = objects.ImageMeta.from_dict({"properties": {}})
self._vmops._hostutils.get_default_vm_generation.return_value = (
constants.IMAGE_PROP_VM_GEN_1)
self._vmops._hostutils.get_supported_vm_types.return_value = [
constants.IMAGE_PROP_VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2]
response = self._vmops.get_image_vm_generation(
mock.sentinel.instance_id, image_meta)
self.assertEqual(constants.VM_GEN_1, response)
def test_get_image_vm_generation_gen2(self):
image_meta = objects.ImageMeta.from_dict(
{"properties":
{"hw_machine_type": constants.IMAGE_PROP_VM_GEN_2}})
self._vmops._hostutils.get_supported_vm_types.return_value = [
constants.IMAGE_PROP_VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2]
response = self._vmops.get_image_vm_generation(
mock.sentinel.instance_id, image_meta)
self.assertEqual(constants.VM_GEN_2, response)
def test_check_vm_image_type_exception(self):
self._vmops._vhdutils.get_vhd_format.return_value = (
constants.DISK_FORMAT_VHD)
self.assertRaises(exception.InstanceUnacceptable,
self._vmops.check_vm_image_type,
mock.sentinel.instance_id, constants.VM_GEN_2,
mock.sentinel.FAKE_PATH)
def _check_requires_certificate(self, os_type):
mock_image_meta = mock.MagicMock()
mock_image_meta.properties = {'os_type': os_type}
expected_result = os_type == fields.OSType.LINUX
result = self._vmops._requires_certificate(mock_image_meta)
self.assertEqual(expected_result, result)
def test_requires_certificate_windows(self):
self._check_requires_certificate(os_type=fields.OSType.WINDOWS)
def test_requires_certificate_linux(self):
self._check_requires_certificate(os_type=fields.OSType.LINUX)
def _check_requires_secure_boot(
self, image_prop_os_type=fields.OSType.LINUX,
image_prop_secure_boot=fields.SecureBoot.REQUIRED,
flavor_secure_boot=fields.SecureBoot.REQUIRED,
vm_gen=constants.VM_GEN_2, expected_exception=True):
mock_instance = fake_instance.fake_instance_obj(self.context)
if flavor_secure_boot:
mock_instance.flavor.extra_specs = {
constants.FLAVOR_SPEC_SECURE_BOOT: flavor_secure_boot}
mock_image_meta = mock.MagicMock()
mock_image_meta.properties = {'os_type': image_prop_os_type}
if image_prop_secure_boot:
mock_image_meta.properties['os_secure_boot'] = (
image_prop_secure_boot)
if expected_exception:
self.assertRaises(exception.InstanceUnacceptable,
self._vmops._requires_secure_boot,
mock_instance, mock_image_meta, vm_gen)
else:
result = self._vmops._requires_secure_boot(mock_instance,
mock_image_meta,
vm_gen)
requires_sb = fields.SecureBoot.REQUIRED in [
flavor_secure_boot, image_prop_secure_boot]
self.assertEqual(requires_sb, result)
def test_requires_secure_boot_ok(self):
self._check_requires_secure_boot(
expected_exception=False)
def test_requires_secure_boot_image_img_prop_none(self):
self._check_requires_secure_boot(
image_prop_secure_boot=None,
expected_exception=False)
def test_requires_secure_boot_image_extra_spec_none(self):
self._check_requires_secure_boot(
flavor_secure_boot=None,
expected_exception=False)
def test_requires_secure_boot_flavor_no_os_type(self):
self._check_requires_secure_boot(
image_prop_os_type=None)
def test_requires_secure_boot_flavor_no_os_type_no_exc(self):
self._check_requires_secure_boot(
image_prop_os_type=None,
image_prop_secure_boot=fields.SecureBoot.DISABLED,
flavor_secure_boot=fields.SecureBoot.DISABLED,
expected_exception=False)
def test_requires_secure_boot_flavor_disabled(self):
self._check_requires_secure_boot(
flavor_secure_boot=fields.SecureBoot.DISABLED)
def test_requires_secure_boot_image_disabled(self):
self._check_requires_secure_boot(
image_prop_secure_boot=fields.SecureBoot.DISABLED)
def test_requires_secure_boot_generation_1(self):
self._check_requires_secure_boot(vm_gen=constants.VM_GEN_1)
@mock.patch('nova.api.metadata.base.InstanceMetadata')
@mock.patch('nova.virt.configdrive.ConfigDriveBuilder')
@mock.patch('oslo_concurrency.processutils.execute')
def _test_create_config_drive(self, mock_execute, mock_ConfigDriveBuilder,
mock_InstanceMetadata, config_drive_format,
config_drive_cdrom, side_effect,
rescue=False):
mock_instance = fake_instance.fake_instance_obj(self.context)
self.flags(config_drive_format=config_drive_format)
self.flags(config_drive_cdrom=config_drive_cdrom, group='hyperv')
self.flags(config_drive_inject_password=True, group='hyperv')
mock_ConfigDriveBuilder().__enter__().make_drive.side_effect = [
side_effect]
path_iso = os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_ISO)
path_vhd = os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_VHD)
def fake_get_configdrive_path(instance_name, disk_format,
rescue=False):
return (path_iso
if disk_format == constants.DVD_FORMAT else path_vhd)
mock_get_configdrive_path = self._vmops._pathutils.get_configdrive_path
mock_get_configdrive_path.side_effect = fake_get_configdrive_path
expected_get_configdrive_path_calls = [mock.call(mock_instance.name,
constants.DVD_FORMAT,
rescue=rescue)]
if not config_drive_cdrom:
expected_call = mock.call(mock_instance.name,
constants.DISK_FORMAT_VHD,
rescue=rescue)
expected_get_configdrive_path_calls.append(expected_call)
if config_drive_format != self.ISO9660:
self.assertRaises(exception.ConfigDriveUnsupportedFormat,
self._vmops._create_config_drive,
self.context,
mock_instance,
[mock.sentinel.FILE],
mock.sentinel.PASSWORD,
mock.sentinel.NET_INFO,
rescue)
elif side_effect is processutils.ProcessExecutionError:
self.assertRaises(processutils.ProcessExecutionError,
self._vmops._create_config_drive,
self.context,
mock_instance,
[mock.sentinel.FILE],
mock.sentinel.PASSWORD,
mock.sentinel.NET_INFO,
rescue)
else:
path = self._vmops._create_config_drive(self.context,
mock_instance,
[mock.sentinel.FILE],
mock.sentinel.PASSWORD,
mock.sentinel.NET_INFO,
rescue)
mock_InstanceMetadata.assert_called_once_with(
mock_instance, content=[mock.sentinel.FILE],
extra_md={'admin_pass': mock.sentinel.PASSWORD},
network_info=mock.sentinel.NET_INFO)
mock_get_configdrive_path.assert_has_calls(
expected_get_configdrive_path_calls)
mock_ConfigDriveBuilder.assert_called_with(
instance_md=mock_InstanceMetadata.return_value)
mock_make_drive = mock_ConfigDriveBuilder().__enter__().make_drive
mock_make_drive.assert_called_once_with(path_iso)
if not CONF.hyperv.config_drive_cdrom:
expected = path_vhd
mock_execute.assert_called_once_with(
CONF.hyperv.qemu_img_cmd,
'convert', '-f', 'raw', '-O', 'vpc',
path_iso, path_vhd, attempts=1)
self._vmops._pathutils.remove.assert_called_once_with(
os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_ISO))
else:
expected = path_iso
self.assertEqual(expected, path)
def test_create_config_drive_cdrom(self):
self._test_create_config_drive(config_drive_format=self.ISO9660,
config_drive_cdrom=True,
side_effect=None)
def test_create_config_drive_vhd(self):
self._test_create_config_drive(config_drive_format=self.ISO9660,
config_drive_cdrom=False,
side_effect=None)
def test_create_rescue_config_drive_vhd(self):
self._test_create_config_drive(config_drive_format=self.ISO9660,
config_drive_cdrom=False,
side_effect=None,
rescue=True)
def test_create_config_drive_execution_error(self):
self._test_create_config_drive(
config_drive_format=self.ISO9660,
config_drive_cdrom=False,
side_effect=processutils.ProcessExecutionError)
def test_attach_config_drive_exception(self):
instance = fake_instance.fake_instance_obj(self.context)
self.assertRaises(exception.InvalidDiskFormat,
self._vmops.attach_config_drive,
instance, 'C:/fake_instance_dir/configdrive.xxx',
constants.VM_GEN_1)
@mock.patch.object(vmops.VMOps, '_attach_drive')
def test_attach_config_drive(self, mock_attach_drive):
instance = fake_instance.fake_instance_obj(self.context)
self._vmops.attach_config_drive(instance,
self._FAKE_CONFIGDRIVE_PATH,
constants.VM_GEN_1)
mock_attach_drive.assert_called_once_with(
instance.name, self._FAKE_CONFIGDRIVE_PATH,
1, 0, constants.CTRL_TYPE_IDE, constants.DISK)
@mock.patch.object(vmops.VMOps, '_attach_drive')
def test_attach_config_drive_gen2(self, mock_attach_drive):
instance = fake_instance.fake_instance_obj(self.context)
self._vmops.attach_config_drive(instance,
self._FAKE_CONFIGDRIVE_PATH,
constants.VM_GEN_2)
mock_attach_drive.assert_called_once_with(
instance.name, self._FAKE_CONFIGDRIVE_PATH,
1, 0, constants.CTRL_TYPE_SCSI, constants.DISK)
def test_detach_config_drive(self):
is_rescue_configdrive = True
mock_lookup_configdrive = (
self._vmops._pathutils.lookup_configdrive_path)
mock_lookup_configdrive.return_value = mock.sentinel.configdrive_path
self._vmops._detach_config_drive(mock.sentinel.instance_name,
rescue=is_rescue_configdrive,
delete=True)
mock_lookup_configdrive.assert_called_once_with(
mock.sentinel.instance_name,
rescue=is_rescue_configdrive)
self._vmops._vmutils.detach_vm_disk.assert_called_once_with(
mock.sentinel.instance_name, mock.sentinel.configdrive_path,
is_physical=False)
self._vmops._pathutils.remove.assert_called_once_with(
mock.sentinel.configdrive_path)
def test_delete_disk_files(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._delete_disk_files(mock_instance.name)
stop_console_handler = (
self._vmops._serial_console_ops.stop_console_handler_unsync)
stop_console_handler.assert_called_once_with(mock_instance.name)
self._vmops._pathutils.get_instance_dir.assert_called_once_with(
mock_instance.name, create_dir=False, remove_dir=True)
@ddt.data({},
{'vm_exists': True},
{'planned_vm_exists': True})
@ddt.unpack
@mock.patch('nova.virt.hyperv.volumeops.VolumeOps.disconnect_volumes')
@mock.patch('nova.virt.hyperv.vmops.VMOps._delete_disk_files')
@mock.patch('nova.virt.hyperv.vmops.VMOps.power_off')
@mock.patch('nova.virt.hyperv.vmops.VMOps.unplug_vifs')
def test_destroy(self, mock_unplug_vifs, mock_power_off,
mock_delete_disk_files, mock_disconnect_volumes,
vm_exists=False, planned_vm_exists=False):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._vmutils.vm_exists.return_value = vm_exists
self._vmops._migrutils.planned_vm_exists.return_value = (
planned_vm_exists)
self._vmops.destroy(instance=mock_instance,
block_device_info=mock.sentinel.FAKE_BD_INFO,
network_info=mock.sentinel.fake_network_info)
mock_destroy_planned_vms = (
self._vmops._migrutils.destroy_existing_planned_vm)
if vm_exists:
self._vmops._vmutils.stop_vm_jobs.assert_called_once_with(
mock_instance.name)
mock_power_off.assert_called_once_with(mock_instance)
self._vmops._vmutils.destroy_vm.assert_called_once_with(
mock_instance.name)
elif planned_vm_exists:
self._vmops._migrutils.planned_vm_exists.assert_called_once_with(
mock_instance.name)
mock_destroy_planned_vms.assert_called_once_with(
mock_instance.name)
else:
self.assertFalse(self._vmops._vmutils.destroy_vm.called)
self.assertFalse(mock_destroy_planned_vms.called)
self._vmops._vmutils.vm_exists.assert_called_with(
mock_instance.name)
mock_unplug_vifs.assert_called_once_with(
mock_instance, mock.sentinel.fake_network_info)
mock_disconnect_volumes.assert_called_once_with(
mock.sentinel.FAKE_BD_INFO)
mock_delete_disk_files.assert_called_once_with(
mock_instance.name)
@mock.patch('nova.virt.hyperv.vmops.VMOps.power_off')
def test_destroy_exception(self, mock_power_off):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._vmutils.destroy_vm.side_effect = (
os_win_exc.HyperVException)
self._vmops._vmutils.vm_exists.return_value = True
self.assertRaises(os_win_exc.HyperVException,
self._vmops.destroy, mock_instance,
mock.sentinel.network_info,
mock.sentinel.block_device_info)
def test_reboot_hard(self):
self._test_reboot(vmops.REBOOT_TYPE_HARD,
os_win_const.HYPERV_VM_STATE_REBOOT)
@mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
def test_reboot_soft(self, mock_soft_shutdown):
mock_soft_shutdown.return_value = True
self._test_reboot(vmops.REBOOT_TYPE_SOFT,
os_win_const.HYPERV_VM_STATE_ENABLED)
@mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
def test_reboot_soft_failed(self, mock_soft_shutdown):
mock_soft_shutdown.return_value = False
self._test_reboot(vmops.REBOOT_TYPE_SOFT,
os_win_const.HYPERV_VM_STATE_REBOOT)
@mock.patch("nova.virt.hyperv.vmops.VMOps.power_on")
@mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
def test_reboot_soft_exception(self, mock_soft_shutdown, mock_power_on):
mock_soft_shutdown.return_value = True
mock_power_on.side_effect = os_win_exc.HyperVException(
"Expected failure")
instance = fake_instance.fake_instance_obj(self.context)
self.assertRaises(os_win_exc.HyperVException, self._vmops.reboot,
instance, {}, vmops.REBOOT_TYPE_SOFT)
mock_soft_shutdown.assert_called_once_with(instance)
mock_power_on.assert_called_once_with(instance, network_info={})
def _test_reboot(self, reboot_type, vm_state):
instance = fake_instance.fake_instance_obj(self.context)
with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state:
self._vmops.reboot(instance, {}, reboot_type)
mock_set_state.assert_called_once_with(instance, vm_state)
@mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off")
def test_soft_shutdown(self, mock_wait_for_power_off):
instance = fake_instance.fake_instance_obj(self.context)
mock_wait_for_power_off.return_value = True
result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT)
mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm
mock_shutdown_vm.assert_called_once_with(instance.name)
mock_wait_for_power_off.assert_called_once_with(
instance.name, self._FAKE_TIMEOUT)
self.assertTrue(result)
@mock.patch("time.sleep")
def test_soft_shutdown_failed(self, mock_sleep):
instance = fake_instance.fake_instance_obj(self.context)
mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm
mock_shutdown_vm.side_effect = os_win_exc.HyperVException(
"Expected failure.")
result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT)
mock_shutdown_vm.assert_called_once_with(instance.name)
self.assertFalse(result)
@mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off")
def test_soft_shutdown_wait(self, mock_wait_for_power_off):
instance = fake_instance.fake_instance_obj(self.context)
mock_wait_for_power_off.side_effect = [False, True]
result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT, 1)
calls = [mock.call(instance.name, 1),
mock.call(instance.name, self._FAKE_TIMEOUT - 1)]
mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm
mock_shutdown_vm.assert_called_with(instance.name)
mock_wait_for_power_off.assert_has_calls(calls)
self.assertTrue(result)
@mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off")
def test_soft_shutdown_wait_timeout(self, mock_wait_for_power_off):
instance = fake_instance.fake_instance_obj(self.context)
mock_wait_for_power_off.return_value = False
result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT, 1.5)
calls = [mock.call(instance.name, 1.5),
mock.call(instance.name, self._FAKE_TIMEOUT - 1.5)]
mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm
mock_shutdown_vm.assert_called_with(instance.name)
mock_wait_for_power_off.assert_has_calls(calls)
self.assertFalse(result)
@mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state')
def test_pause(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.pause(instance=mock_instance)
mock_set_vm_state.assert_called_once_with(
mock_instance, os_win_const.HYPERV_VM_STATE_PAUSED)
@mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state')
def test_unpause(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.unpause(instance=mock_instance)
mock_set_vm_state.assert_called_once_with(
mock_instance, os_win_const.HYPERV_VM_STATE_ENABLED)
@mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state')
def test_suspend(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.suspend(instance=mock_instance)
mock_set_vm_state.assert_called_once_with(
mock_instance, os_win_const.HYPERV_VM_STATE_SUSPENDED)
@mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state')
def test_resume(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.resume(instance=mock_instance)
mock_set_vm_state.assert_called_once_with(
mock_instance, os_win_const.HYPERV_VM_STATE_ENABLED)
def _test_power_off(self, timeout, set_state_expected=True):
instance = fake_instance.fake_instance_obj(self.context)
with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state:
self._vmops.power_off(instance, timeout)
serialops = self._vmops._serial_console_ops
serialops.stop_console_handler.assert_called_once_with(
instance.name)
if set_state_expected:
mock_set_state.assert_called_once_with(
instance, os_win_const.HYPERV_VM_STATE_DISABLED)
def test_power_off_hard(self):
self._test_power_off(timeout=0)
@mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
def test_power_off_exception(self, mock_soft_shutdown):
mock_soft_shutdown.return_value = False
self._test_power_off(timeout=1)
@mock.patch("nova.virt.hyperv.vmops.VMOps._set_vm_state")
@mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
def test_power_off_soft(self, mock_soft_shutdown, mock_set_state):
instance = fake_instance.fake_instance_obj(self.context)
mock_soft_shutdown.return_value = True
self._vmops.power_off(instance, 1, 0)
serialops = self._vmops._serial_console_ops
serialops.stop_console_handler.assert_called_once_with(
instance.name)
mock_soft_shutdown.assert_called_once_with(
instance, 1, vmops.SHUTDOWN_TIME_INCREMENT)
self.assertFalse(mock_set_state.called)
@mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
def test_power_off_unexisting_instance(self, mock_soft_shutdown):
mock_soft_shutdown.side_effect = os_win_exc.HyperVVMNotFoundException(
vm_name=mock.sentinel.vm_name)
self._test_power_off(timeout=1, set_state_expected=False)
@mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state')
def test_power_on(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.power_on(mock_instance)
mock_set_vm_state.assert_called_once_with(
mock_instance, os_win_const.HYPERV_VM_STATE_ENABLED)
@mock.patch('nova.virt.hyperv.volumeops.VolumeOps'
'.fix_instance_volume_disk_paths')
@mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state')
def test_power_on_having_block_devices(self, mock_set_vm_state,
mock_fix_instance_vol_paths):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.power_on(mock_instance, mock.sentinel.block_device_info)
mock_fix_instance_vol_paths.assert_called_once_with(
mock_instance.name, mock.sentinel.block_device_info)
mock_set_vm_state.assert_called_once_with(
mock_instance, os_win_const.HYPERV_VM_STATE_ENABLED)
@mock.patch.object(vmops.VMOps, 'plug_vifs')
def test_power_on_with_network_info(self, mock_plug_vifs):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.power_on(mock_instance,
network_info=mock.sentinel.fake_network_info)
mock_plug_vifs.assert_called_once_with(
mock_instance, mock.sentinel.fake_network_info)
@mock.patch.object(vmops.VMOps, 'plug_vifs')
def test_power_on_vifs_already_plugged(self, mock_plug_vifs):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.power_on(mock_instance,
should_plug_vifs=False)
self.assertFalse(mock_plug_vifs.called)
def _test_set_vm_state(self, state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._set_vm_state(mock_instance, state)
self._vmops._vmutils.set_vm_state.assert_called_once_with(
mock_instance.name, state)
def test_set_vm_state_disabled(self):
self._test_set_vm_state(state=os_win_const.HYPERV_VM_STATE_DISABLED)
def test_set_vm_state_enabled(self):
self._test_set_vm_state(state=os_win_const.HYPERV_VM_STATE_ENABLED)
def test_set_vm_state_reboot(self):
self._test_set_vm_state(state=os_win_const.HYPERV_VM_STATE_REBOOT)
def test_set_vm_state_exception(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._vmutils.set_vm_state.side_effect = (
os_win_exc.HyperVException)
self.assertRaises(os_win_exc.HyperVException,
self._vmops._set_vm_state,
mock_instance, mock.sentinel.STATE)
def test_get_vm_state(self):
summary_info = {'EnabledState': os_win_const.HYPERV_VM_STATE_DISABLED}
with mock.patch.object(self._vmops._vmutils,
'get_vm_summary_info') as mock_get_summary_info:
mock_get_summary_info.return_value = summary_info
response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME)
self.assertEqual(response, os_win_const.HYPERV_VM_STATE_DISABLED)
@mock.patch.object(vmops.VMOps, '_get_vm_state')
def test_wait_for_power_off_true(self, mock_get_state):
mock_get_state.return_value = os_win_const.HYPERV_VM_STATE_DISABLED
result = self._vmops._wait_for_power_off(
mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT)
mock_get_state.assert_called_with(mock.sentinel.FAKE_VM_NAME)
self.assertTrue(result)
@mock.patch.object(vmops.etimeout, "with_timeout")
def test_wait_for_power_off_false(self, mock_with_timeout):
mock_with_timeout.side_effect = etimeout.Timeout()
result = self._vmops._wait_for_power_off(
mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT)
self.assertFalse(result)
def test_create_vm_com_port_pipes(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_serial_ports = {
1: constants.SERIAL_PORT_TYPE_RO,
2: constants.SERIAL_PORT_TYPE_RW
}
self._vmops._create_vm_com_port_pipes(mock_instance,
mock_serial_ports)
expected_calls = []
for port_number, port_type in mock_serial_ports.items():
expected_pipe = r'\\.\pipe\%s_%s' % (mock_instance.uuid,
port_type)
expected_calls.append(mock.call(mock_instance.name,
port_number,
expected_pipe))
mock_set_conn = self._vmops._vmutils.set_vm_serial_port_connection
mock_set_conn.assert_has_calls(expected_calls)
def test_list_instance_uuids(self):
fake_uuid = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3'
with mock.patch.object(self._vmops._vmutils,
'list_instance_notes') as mock_list_notes:
mock_list_notes.return_value = [('fake_name', [fake_uuid])]
response = self._vmops.list_instance_uuids()
mock_list_notes.assert_called_once_with()
self.assertEqual(response, [fake_uuid])
def test_copy_vm_dvd_disks(self):
fake_paths = [mock.sentinel.FAKE_DVD_PATH1,
mock.sentinel.FAKE_DVD_PATH2]
mock_copy = self._vmops._pathutils.copyfile
mock_get_dvd_disk_paths = self._vmops._vmutils.get_vm_dvd_disk_paths
mock_get_dvd_disk_paths.return_value = fake_paths
self._vmops._pathutils.get_instance_dir.return_value = (
mock.sentinel.FAKE_DEST_PATH)
self._vmops.copy_vm_dvd_disks(mock.sentinel.FAKE_VM_NAME,
mock.sentinel.FAKE_DEST_HOST)
mock_get_dvd_disk_paths.assert_called_with(mock.sentinel.FAKE_VM_NAME)
self._vmops._pathutils.get_instance_dir.assert_called_once_with(
mock.sentinel.FAKE_VM_NAME,
remote_server=mock.sentinel.FAKE_DEST_HOST)
self.assertEqual(2, mock_copy.call_count)
mock_copy.assert_has_calls([mock.call(mock.sentinel.FAKE_DVD_PATH1,
mock.sentinel.FAKE_DEST_PATH),
mock.call(mock.sentinel.FAKE_DVD_PATH2,
mock.sentinel.FAKE_DEST_PATH)])
def test_plug_vifs(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
fake_vif1 = {'id': mock.sentinel.ID1,
'type': mock.sentinel.vif_type1}
fake_vif2 = {'id': mock.sentinel.ID2,
'type': mock.sentinel.vif_type2}
mock_network_info = [fake_vif1, fake_vif2]
calls = [mock.call(mock_instance, fake_vif1),
mock.call(mock_instance, fake_vif2)]
self._vmops.plug_vifs(mock_instance,
network_info=mock_network_info)
self._vmops._vif_driver.plug.assert_has_calls(calls)
def test_unplug_vifs(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
fake_vif1 = {'id': mock.sentinel.ID1,
'type': mock.sentinel.vif_type1}
fake_vif2 = {'id': mock.sentinel.ID2,
'type': mock.sentinel.vif_type2}
mock_network_info = [fake_vif1, fake_vif2]
calls = [mock.call(mock_instance, fake_vif1),
mock.call(mock_instance, fake_vif2)]
self._vmops.unplug_vifs(mock_instance,
network_info=mock_network_info)
self._vmops._vif_driver.unplug.assert_has_calls(calls)
def _setup_remotefx_mocks(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_instance.flavor.extra_specs = {
'os:resolution': os_win_const.REMOTEFX_MAX_RES_1920x1200,
'os:monitors': '2',
'os:vram': '256'}
return mock_instance
def test_configure_remotefx_not_required(self):
self.flags(enable_remotefx=False, group='hyperv')
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._configure_remotefx(mock_instance, mock.sentinel.VM_GEN)
def test_configure_remotefx_exception_enable_config(self):
self.flags(enable_remotefx=False, group='hyperv')
mock_instance = self._setup_remotefx_mocks()
self.assertRaises(exception.InstanceUnacceptable,
self._vmops._configure_remotefx,
mock_instance, mock.sentinel.VM_GEN)
def test_configure_remotefx_exception_server_feature(self):
self.flags(enable_remotefx=True, group='hyperv')
mock_instance = self._setup_remotefx_mocks()
self._vmops._hostutils.check_server_feature.return_value = False
self.assertRaises(exception.InstanceUnacceptable,
self._vmops._configure_remotefx,
mock_instance, mock.sentinel.VM_GEN)
def test_configure_remotefx_exception_vm_gen(self):
self.flags(enable_remotefx=True, group='hyperv')
mock_instance = self._setup_remotefx_mocks()
self._vmops._hostutils.check_server_feature.return_value = True
self._vmops._vmutils.vm_gen_supports_remotefx.return_value = False
self.assertRaises(exception.InstanceUnacceptable,
self._vmops._configure_remotefx,
mock_instance, mock.sentinel.VM_GEN)
def test_configure_remotefx(self):
self.flags(enable_remotefx=True, group='hyperv')
mock_instance = self._setup_remotefx_mocks()
self._vmops._hostutils.check_server_feature.return_value = True
self._vmops._vmutils.vm_gen_supports_remotefx.return_value = True
extra_specs = mock_instance.flavor.extra_specs
self._vmops._configure_remotefx(mock_instance,
constants.VM_GEN_1)
mock_enable_remotefx = (
self._vmops._vmutils.enable_remotefx_video_adapter)
mock_enable_remotefx.assert_called_once_with(
mock_instance.name, int(extra_specs['os:monitors']),
extra_specs['os:resolution'],
int(extra_specs['os:vram']) * units.Mi)
@mock.patch.object(vmops.VMOps, '_get_vm_state')
def test_check_hotplug_available_vm_disabled(self, mock_get_vm_state):
fake_vm = fake_instance.fake_instance_obj(self.context)
mock_get_vm_state.return_value = os_win_const.HYPERV_VM_STATE_DISABLED
result = self._vmops._check_hotplug_available(fake_vm)
self.assertTrue(result)
mock_get_vm_state.assert_called_once_with(fake_vm.name)
self.assertFalse(
self._vmops._hostutils.check_min_windows_version.called)
self.assertFalse(self._vmops._vmutils.get_vm_generation.called)
@mock.patch.object(vmops.VMOps, '_get_vm_state')
def _test_check_hotplug_available(
self, mock_get_vm_state, expected_result=False,
vm_gen=constants.VM_GEN_2, windows_version=_WIN_VERSION_10):
fake_vm = fake_instance.fake_instance_obj(self.context)
mock_get_vm_state.return_value = os_win_const.HYPERV_VM_STATE_ENABLED
self._vmops._vmutils.get_vm_generation.return_value = vm_gen
fake_check_win_vers = self._vmops._hostutils.check_min_windows_version
fake_check_win_vers.return_value = (
windows_version == self._WIN_VERSION_10)
result = self._vmops._check_hotplug_available(fake_vm)
self.assertEqual(expected_result, result)
mock_get_vm_state.assert_called_once_with(fake_vm.name)
fake_check_win_vers.assert_called_once_with(10, 0)
def test_check_if_hotplug_available(self):
self._test_check_hotplug_available(expected_result=True)
def test_check_if_hotplug_available_gen1(self):
self._test_check_hotplug_available(
expected_result=False, vm_gen=constants.VM_GEN_1)
def test_check_if_hotplug_available_win_6_3(self):
self._test_check_hotplug_available(
expected_result=False, windows_version=self._WIN_VERSION_6_3)
@mock.patch.object(vmops.VMOps, '_check_hotplug_available')
def test_attach_interface(self, mock_check_hotplug_available):
mock_check_hotplug_available.return_value = True
fake_vm = fake_instance.fake_instance_obj(self.context)
fake_vif = test_virtual_interface.fake_vif
self._vmops.attach_interface(fake_vm, fake_vif)
mock_check_hotplug_available.assert_called_once_with(fake_vm)
self._vmops._vif_driver.plug.assert_called_once_with(
fake_vm, fake_vif)
self._vmops._vmutils.create_nic.assert_called_once_with(
fake_vm.name, fake_vif['id'], fake_vif['address'])
@mock.patch.object(vmops.VMOps, '_check_hotplug_available')
def test_attach_interface_failed(self, mock_check_hotplug_available):
mock_check_hotplug_available.return_value = False
self.assertRaises(exception.InterfaceAttachFailed,
self._vmops.attach_interface,
mock.MagicMock(), mock.sentinel.fake_vif)
@mock.patch.object(vmops.VMOps, '_check_hotplug_available')
def test_detach_interface(self, mock_check_hotplug_available):
mock_check_hotplug_available.return_value = True
fake_vm = fake_instance.fake_instance_obj(self.context)
fake_vif = test_virtual_interface.fake_vif
self._vmops.detach_interface(fake_vm, fake_vif)
mock_check_hotplug_available.assert_called_once_with(fake_vm)
self._vmops._vif_driver.unplug.assert_called_once_with(
fake_vm, fake_vif)
self._vmops._vmutils.destroy_nic.assert_called_once_with(
fake_vm.name, fake_vif['id'])
@mock.patch.object(vmops.VMOps, '_check_hotplug_available')
def test_detach_interface_failed(self, mock_check_hotplug_available):
mock_check_hotplug_available.return_value = False
self.assertRaises(exception.InterfaceDetachFailed,
self._vmops.detach_interface,
mock.MagicMock(), mock.sentinel.fake_vif)
@mock.patch.object(vmops.VMOps, '_check_hotplug_available')
def test_detach_interface_missing_instance(self, mock_check_hotplug):
mock_check_hotplug.side_effect = os_win_exc.HyperVVMNotFoundException(
vm_name='fake_vm')
self.assertRaises(exception.InterfaceDetachFailed,
self._vmops.detach_interface,
mock.MagicMock(), mock.sentinel.fake_vif)
@mock.patch('nova.virt.configdrive.required_by')
@mock.patch.object(vmops.VMOps, '_create_root_vhd')
@mock.patch.object(vmops.VMOps, 'get_image_vm_generation')
@mock.patch.object(vmops.VMOps, '_attach_drive')
@mock.patch.object(vmops.VMOps, '_create_config_drive')
@mock.patch.object(vmops.VMOps, 'attach_config_drive')
@mock.patch.object(vmops.VMOps, '_detach_config_drive')
@mock.patch.object(vmops.VMOps, 'power_on')
def test_rescue_instance(self, mock_power_on,
mock_detach_config_drive,
mock_attach_config_drive,
mock_create_config_drive,
mock_attach_drive,
mock_get_image_vm_gen,
mock_create_root_vhd,
mock_configdrive_required):
mock_image_meta = mock.MagicMock()
mock_vm_gen = constants.VM_GEN_2
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_configdrive_required.return_value = True
mock_create_root_vhd.return_value = mock.sentinel.rescue_vhd_path
mock_get_image_vm_gen.return_value = mock_vm_gen
self._vmops._vmutils.get_vm_generation.return_value = mock_vm_gen
self._vmops._pathutils.lookup_root_vhd_path.return_value = (
mock.sentinel.root_vhd_path)
mock_create_config_drive.return_value = (
mock.sentinel.rescue_configdrive_path)
self._vmops.rescue_instance(self.context,
mock_instance,
mock.sentinel.network_info,
mock_image_meta,
mock.sentinel.rescue_password)
mock_get_image_vm_gen.assert_called_once_with(
mock_instance.uuid, mock_image_meta)
self._vmops._vmutils.detach_vm_disk.assert_called_once_with(
mock_instance.name, mock.sentinel.root_vhd_path,
is_physical=False)
mock_attach_drive.assert_called_once_with(
mock_instance.name, mock.sentinel.rescue_vhd_path, 0,
self._vmops._ROOT_DISK_CTRL_ADDR,
vmops.VM_GENERATIONS_CONTROLLER_TYPES[mock_vm_gen])
self._vmops._vmutils.attach_scsi_drive.assert_called_once_with(
mock_instance.name, mock.sentinel.root_vhd_path,
drive_type=constants.DISK)
mock_detach_config_drive.assert_called_once_with(mock_instance.name)
mock_create_config_drive.assert_called_once_with(
self.context, mock_instance,
injected_files=None,
admin_password=mock.sentinel.rescue_password,
network_info=mock.sentinel.network_info,
rescue=True)
mock_attach_config_drive.assert_called_once_with(
mock_instance, mock.sentinel.rescue_configdrive_path,
mock_vm_gen)
@mock.patch.object(vmops.VMOps, '_create_root_vhd')
@mock.patch.object(vmops.VMOps, 'get_image_vm_generation')
@mock.patch.object(vmops.VMOps, 'unrescue_instance')
def _test_rescue_instance_exception(self, mock_unrescue,
mock_get_image_vm_gen,
mock_create_root_vhd,
wrong_vm_gen=False,
boot_from_volume=False,
expected_exc=None):
mock_vm_gen = constants.VM_GEN_1
image_vm_gen = (mock_vm_gen
if not wrong_vm_gen else constants.VM_GEN_2)
mock_image_meta = mock.MagicMock()
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_get_image_vm_gen.return_value = image_vm_gen
self._vmops._vmutils.get_vm_generation.return_value = mock_vm_gen
self._vmops._pathutils.lookup_root_vhd_path.return_value = (
mock.sentinel.root_vhd_path if not boot_from_volume else None)
self.assertRaises(expected_exc,
self._vmops.rescue_instance,
self.context, mock_instance,
mock.sentinel.network_info,
mock_image_meta,
mock.sentinel.rescue_password)
mock_unrescue.assert_called_once_with(mock_instance)
def test_rescue_instance_wrong_vm_gen(self):
# Test the case when the rescue image requires a different
# vm generation than the actual rescued instance.
self._test_rescue_instance_exception(
wrong_vm_gen=True,
expected_exc=exception.ImageUnacceptable)
def test_rescue_instance_boot_from_volume(self):
# Rescuing instances booted from volume is not supported.
self._test_rescue_instance_exception(
boot_from_volume=True,
expected_exc=exception.InstanceNotRescuable)
@mock.patch.object(fileutils, 'delete_if_exists')
@mock.patch.object(vmops.VMOps, '_attach_drive')
@mock.patch.object(vmops.VMOps, 'attach_config_drive')
@mock.patch.object(vmops.VMOps, '_detach_config_drive')
@mock.patch.object(vmops.VMOps, 'power_on')
@mock.patch.object(vmops.VMOps, 'power_off')
def test_unrescue_instance(self, mock_power_on, mock_power_off,
mock_detach_config_drive,
mock_attach_configdrive,
mock_attach_drive,
mock_delete_if_exists):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_vm_gen = constants.VM_GEN_2
self._vmops._vmutils.get_vm_generation.return_value = mock_vm_gen
self._vmops._vmutils.is_disk_attached.return_value = False
self._vmops._pathutils.lookup_root_vhd_path.side_effect = (
mock.sentinel.root_vhd_path, mock.sentinel.rescue_vhd_path)
self._vmops._pathutils.lookup_configdrive_path.return_value = (
mock.sentinel.configdrive_path)
self._vmops.unrescue_instance(mock_instance)
self._vmops._pathutils.lookup_root_vhd_path.assert_has_calls(
[mock.call(mock_instance.name),
mock.call(mock_instance.name, rescue=True)])
self._vmops._vmutils.detach_vm_disk.assert_has_calls(
[mock.call(mock_instance.name,
mock.sentinel.root_vhd_path,
is_physical=False),
mock.call(mock_instance.name,
mock.sentinel.rescue_vhd_path,
is_physical=False)])
mock_attach_drive.assert_called_once_with(
mock_instance.name, mock.sentinel.root_vhd_path, 0,
self._vmops._ROOT_DISK_CTRL_ADDR,
vmops.VM_GENERATIONS_CONTROLLER_TYPES[mock_vm_gen])
mock_detach_config_drive.assert_called_once_with(mock_instance.name,
rescue=True,
delete=True)
mock_delete_if_exists.assert_called_once_with(
mock.sentinel.rescue_vhd_path)
self._vmops._vmutils.is_disk_attached.assert_called_once_with(
mock.sentinel.configdrive_path,
is_physical=False)
mock_attach_configdrive.assert_called_once_with(
mock_instance, mock.sentinel.configdrive_path, mock_vm_gen)
mock_power_on.assert_called_once_with(mock_instance)
@mock.patch.object(vmops.VMOps, 'power_off')
def test_unrescue_instance_missing_root_image(self, mock_power_off):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_instance.vm_state = vm_states.RESCUED
self._vmops._pathutils.lookup_root_vhd_path.return_value = None
self.assertRaises(exception.InstanceNotRescuable,
self._vmops.unrescue_instance,
mock_instance)
@mock.patch.object(volumeops.VolumeOps, 'bytes_per_sec_to_iops')
@mock.patch.object(vmops.VMOps, '_get_scoped_flavor_extra_specs')
@mock.patch.object(vmops.VMOps, '_get_instance_local_disks')
def test_set_instance_disk_qos_specs(self, mock_get_local_disks,
mock_get_scoped_specs,
mock_bytes_per_sec_to_iops):
fake_total_bytes_sec = 8
fake_total_iops_sec = 1
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_local_disks = [mock.sentinel.root_vhd_path,
mock.sentinel.eph_vhd_path]
mock_get_local_disks.return_value = mock_local_disks
mock_set_qos_specs = self._vmops._vmutils.set_disk_qos_specs
mock_get_scoped_specs.return_value = dict(
disk_total_bytes_sec=fake_total_bytes_sec)
mock_bytes_per_sec_to_iops.return_value = fake_total_iops_sec
self._vmops._set_instance_disk_qos_specs(mock_instance)
mock_bytes_per_sec_to_iops.assert_called_once_with(
fake_total_bytes_sec)
mock_get_local_disks.assert_called_once_with(mock_instance.name)
expected_calls = [mock.call(disk_path, fake_total_iops_sec)
for disk_path in mock_local_disks]
mock_set_qos_specs.assert_has_calls(expected_calls)
def test_get_instance_local_disks(self):
fake_instance_dir = 'fake_instance_dir'
fake_local_disks = [os.path.join(fake_instance_dir, disk_name)
for disk_name in ['root.vhd', 'configdrive.iso']]
fake_instance_disks = ['fake_remote_disk'] + fake_local_disks
mock_get_storage_paths = self._vmops._vmutils.get_vm_storage_paths
mock_get_storage_paths.return_value = [fake_instance_disks, []]
mock_get_instance_dir = self._vmops._pathutils.get_instance_dir
mock_get_instance_dir.return_value = fake_instance_dir
ret_val = self._vmops._get_instance_local_disks(
mock.sentinel.instance_name)
self.assertEqual(fake_local_disks, ret_val)
def test_get_scoped_flavor_extra_specs(self):
# The flavor extra spect dict contains only string values.
fake_total_bytes_sec = '8'
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_instance.flavor.extra_specs = {
'spec_key': 'spec_value',
'quota:total_bytes_sec': fake_total_bytes_sec}
ret_val = self._vmops._get_scoped_flavor_extra_specs(
mock_instance, scope='quota')
expected_specs = {
'total_bytes_sec': fake_total_bytes_sec
}
self.assertEqual(expected_specs, ret_val)
|
|
# -*- coding: utf-8 -*-
"""
Model tests for artifact
"""
from cStringIO import StringIO
import time
from datetime import datetime, timedelta
from cgi import FieldStorage
from pylons import c, g, request, response
from nose.tools import assert_raises, assert_equals, with_setup
import mock
from mock import patch
from ming.orm import session, ThreadLocalORMSession
from webob import Request, Response, exc
from allura import model as M
from allura.lib.app_globals import Globals
from allura.lib import helpers as h
from allura.tests import TestController
from alluratest.controller import setup_global_objects
def setUp():
controller = TestController()
controller.setUp()
controller.app.get('/wiki/Home/')
setup_global_objects()
ThreadLocalORMSession.close_all()
h.set_context('test', 'wiki', neighborhood='Projects')
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
def tearDown():
ThreadLocalORMSession.close_all()
@with_setup(setUp, tearDown)
def test_discussion_methods():
d = M.Discussion(shortname='test', name='test')
assert d.thread_class() == M.Thread
assert d.post_class() == M.Post
assert d.attachment_class() == M.DiscussionAttachment
ThreadLocalORMSession.flush_all()
d.update_stats()
ThreadLocalORMSession.flush_all()
assert d.last_post == None
assert d.url().endswith('wiki/_discuss/')
assert d.index()['name_s'] == 'test'
assert d.subscription() == None
assert d.find_posts().count() == 0
jsn = d.__json__()
assert jsn['name'] == d.name
d.delete()
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
@with_setup(setUp, tearDown)
def test_thread_methods():
d = M.Discussion(shortname='test', name='test')
t = M.Thread.new(discussion_id=d._id, subject='Test Thread')
assert t.discussion_class() == M.Discussion
assert t.post_class() == M.Post
assert t.attachment_class() == M.DiscussionAttachment
p0 = t.post('This is a post')
p1 = t.post('This is another post')
time.sleep(0.25)
p2 = t.post('This is a reply', parent_id=p0._id)
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
d = M.Discussion.query.get(shortname='test')
t = d.threads[0]
assert d.last_post is not None
assert t.last_post is not None
t.create_post_threads(t.posts)
posts0 = t.find_posts(page=0, limit=10, style='threaded')
posts1 = t.find_posts(page=0, limit=10, style='timestamp')
assert posts0 != posts1
ts = p0.timestamp.replace(
microsecond=int(p0.timestamp.microsecond // 1000) * 1000)
posts2 = t.find_posts(page=0, limit=10, style='threaded', timestamp=ts)
assert len(posts2) > 0
assert 'wiki/_discuss/' in t.url()
assert t.index()['views_i'] == 0
assert not t.subscription
t.subscription = True
assert t.subscription
t.subscription = False
assert not t.subscription
assert t.top_level_posts().count() == 2
assert t.post_count == 3
jsn = t.__json__()
assert '_id' in jsn
assert_equals(len(jsn['posts']), 3)
(p.approve() for p in (p0, p1))
assert t.num_replies == 2
t.spam()
assert t.num_replies == 0
ThreadLocalORMSession.flush_all()
assert len(t.find_posts()) == 0
t.delete()
@with_setup(setUp, tearDown)
def test_thread_new():
with mock.patch('allura.model.discuss.h.nonce') as nonce:
nonce.side_effect = ['deadbeef', 'deadbeef', 'beefdead']
d = M.Discussion(shortname='test', name='test')
t1 = M.Thread.new(discussion_id=d._id, subject='Test Thread One')
t2 = M.Thread.new(discussion_id=d._id, subject='Test Thread Two')
ThreadLocalORMSession.flush_all()
session(t1).expunge(t1)
session(t2).expunge(t2)
t1_2 = M.Thread.query.get(_id=t1._id)
t2_2 = M.Thread.query.get(_id=t2._id)
assert_equals(t1._id, 'deadbeef')
assert_equals(t2._id, 'beefdead')
assert_equals(t1_2.subject, 'Test Thread One')
assert_equals(t2_2.subject, 'Test Thread Two')
@with_setup(setUp, tearDown)
def test_post_methods():
d = M.Discussion(shortname='test', name='test')
t = M.Thread.new(discussion_id=d._id, subject='Test Thread')
p = t.post('This is a post')
p2 = t.post('This is another post')
assert p.discussion_class() == M.Discussion
assert p.thread_class() == M.Thread
assert p.attachment_class() == M.DiscussionAttachment
p.commit()
assert p.parent is None
assert p.subject == 'Test Thread'
assert p.attachments.count() == 0
assert 'Test Admin' in p.summary()
assert 'wiki/_discuss' in p.url()
assert p.reply_subject() == 'Re: Test Thread'
assert p.link_text() == p.subject
ss = p.history().first()
assert 'Version' in ss.index()['title_s']
assert '#' in ss.shorthand_id()
jsn = p.__json__()
assert jsn["thread_id"] == t._id
(p.approve() for p in (p, p2))
assert t.num_replies == 1
p2.spam()
assert t.num_replies == 0
p.spam()
assert t.num_replies == 0
p.delete()
assert t.num_replies == 0
@with_setup(setUp, tearDown)
def test_attachment_methods():
d = M.Discussion(shortname='test', name='test')
t = M.Thread.new(discussion_id=d._id, subject='Test Thread')
p = t.post('This is a post')
p_att = p.attach('foo.text', StringIO('Hello, world!'),
discussion_id=d._id,
thread_id=t._id,
post_id=p._id)
t_att = p.attach('foo2.text', StringIO('Hello, thread!'),
discussion_id=d._id,
thread_id=t._id)
d_att = p.attach('foo3.text', StringIO('Hello, discussion!'),
discussion_id=d._id)
ThreadLocalORMSession.flush_all()
assert p_att.post == p
assert p_att.thread == t
assert p_att.discussion == d
for att in (p_att, t_att, d_att):
assert 'wiki/_discuss' in att.url()
assert 'attachment/' in att.url()
# Test notification in mail
t = M.Thread.new(discussion_id=d._id, subject='Test comment notification')
fs = FieldStorage()
fs.name='file_info'
fs.filename='fake.txt'
fs.type = 'text/plain'
fs.file=StringIO('this is the content of the fake file\n')
p = t.post(text=u'test message', forum= None, subject= '', file_info=fs)
ThreadLocalORMSession.flush_all()
n = M.Notification.query.get(subject=u'[test:wiki] Test comment notification')
assert_equals(u'test message\n\n\nAttachment: fake.txt (37 Bytes; text/plain)', n.text)
@with_setup(setUp, tearDown)
def test_discussion_delete():
d = M.Discussion(shortname='test', name='test')
t = M.Thread.new(discussion_id=d._id, subject='Test Thread')
p = t.post('This is a post')
p.attach('foo.text', StringIO(''),
discussion_id=d._id,
thread_id=t._id,
post_id=p._id)
r = M.ArtifactReference.from_artifact(d)
rid = d.index_id()
ThreadLocalORMSession.flush_all()
d.delete()
ThreadLocalORMSession.flush_all()
assert_equals(M.ArtifactReference.query.find(dict(_id=rid)).count(), 0)
@with_setup(setUp, tearDown)
def test_thread_delete():
d = M.Discussion(shortname='test', name='test')
t = M.Thread.new(discussion_id=d._id, subject='Test Thread')
p = t.post('This is a post')
p.attach('foo.text', StringIO(''),
discussion_id=d._id,
thread_id=t._id,
post_id=p._id)
ThreadLocalORMSession.flush_all()
t.delete()
@with_setup(setUp, tearDown)
def test_post_delete():
d = M.Discussion(shortname='test', name='test')
t = M.Thread.new(discussion_id=d._id, subject='Test Thread')
p = t.post('This is a post')
p.attach('foo.text', StringIO(''),
discussion_id=d._id,
thread_id=t._id,
post_id=p._id)
ThreadLocalORMSession.flush_all()
p.delete()
@with_setup(setUp, tearDown)
def test_post_permission_check():
d = M.Discussion(shortname='test', name='test')
t = M.Thread.new(discussion_id=d._id, subject='Test Thread')
c.user = M.User.anonymous()
try:
p1 = t.post('This post will fail the check.')
assert False, "Expected an anonymous post to fail."
except exc.HTTPUnauthorized:
pass
p2 = t.post('This post will pass the check.', ignore_security=True)
@with_setup(setUp, tearDown)
def test_post_url_paginated():
d = M.Discussion(shortname='test', name='test')
t = M.Thread(discussion_id=d._id, subject='Test Thread')
p = [] # posts in display order
ts = datetime.utcnow() - timedelta(days=1)
for i in range(5):
ts += timedelta(minutes=1)
p.append(t.post('This is a post #%s' % i, timestamp=ts))
ts += timedelta(minutes=1)
p.insert(1, t.post(
'This is reply #0 to post #0', parent_id=p[0]._id, timestamp=ts))
ts += timedelta(minutes=1)
p.insert(2, t.post(
'This is reply #1 to post #0', parent_id=p[0]._id, timestamp=ts))
ts += timedelta(minutes=1)
p.insert(4, t.post(
'This is reply #0 to post #1', parent_id=p[3]._id, timestamp=ts))
ts += timedelta(minutes=1)
p.insert(6, t.post(
'This is reply #0 to post #2', parent_id=p[5]._id, timestamp=ts))
ts += timedelta(minutes=1)
p.insert(7, t.post(
'This is reply #1 to post #2', parent_id=p[5]._id, timestamp=ts))
ts += timedelta(minutes=1)
p.insert(8, t.post(
'This is reply #0 to reply #1 to post #2',
parent_id=p[7]._id, timestamp=ts))
# with default paging limit
for _p in p:
url = t.url() + '?limit=50#' + _p.slug
assert _p.url_paginated() == url, _p.url_paginated()
# with user paging limit
limit = 3
c.user.set_pref('results_per_page', limit)
for i, _p in enumerate(p):
page = i / limit
url = t.url() + '?limit=%s' % limit
if page > 0:
url += '&page=%s' % page
url += '#' + _p.slug
assert _p.url_paginated() == url, _p.url_paginated()
@with_setup(setUp, tearDown)
def test_post_notify():
d = M.Discussion(shortname='test', name='test')
d.monitoring_email = 'darthvader@deathstar.org'
t = M.Thread.new(discussion_id=d._id, subject='Test Thread')
with patch('allura.model.notification.Notification.send_simple') as send:
t.post('This is a post')
send.assert_called_with(d.monitoring_email)
c.app.config.project.notifications_disabled = True
with patch('allura.model.notification.Notification.send_simple') as send:
t.post('Another post')
try:
send.assert_called_with(d.monitoring_email)
except AssertionError:
pass # method not called as expected
else:
assert False, 'send_simple must not be called'
|
|
import os
import sys
import time
import math
import numpy as np
import theano
import theano.tensor as T
import theano.tensor.shared_randomstreams
from util import datapy, color, paramgraphics
from optimization import optimizer
from layer import FullyConnected, nonlinearity
from layer import GaussianHidden, NoParamsBernoulliVisiable,Pegasos
from layer import ConvMaxPool, UnpoolConvNon
def cmmva_6layer_dropout_mnist_60000(seed=0, start_layer=0, end_layer=1, dropout_flag=1, drop_inverses_flag=0, learning_rate=3e-5, predir=None, n_batch=144,
dataset='mnist.pkl.gz', batch_size=500, nkerns=[20, 50], n_hidden=[500, 50]):
"""
Implementation of convolutional MMVA
"""
#cp->cd->cpd->cd->c
nkerns=[32, 32, 64, 64, 64]
drops=[1, 0, 1, 0, 0, 1]
#skerns=[5, 3, 3, 3, 3]
#pools=[2, 1, 1, 2, 1]
#modes=['same']*5
n_hidden=[500, 50]
drop_inverses=[1,]
# 28->12->12->5->5/5*5*64->500->50->500->5*5*64/5->5->12->12->28
if dataset=='mnist.pkl.gz':
dim_input=(28, 28)
colorImg=False
D = 1.0
C = 1.0
if os.environ.has_key('C'):
C = np.cast['float32'](float((os.environ['C'])))
if os.environ.has_key('D'):
D = np.cast['float32'](float((os.environ['D'])))
color.printRed('D '+str(D)+' C '+str(C))
logdir = 'results/supervised/cmmva/mnist/cmmva_6layer_60000_'+str(nkerns)+str(n_hidden)+'_D_'+str(D)+'_C_'+str(C)+'_'+str(learning_rate)+'_'
if predir is not None:
logdir +='pre_'
if dropout_flag == 1:
logdir += ('dropout_'+str(drops)+'_')
if drop_inverses_flag==1:
logdir += ('inversedropout_'+str(drop_inverses)+'_')
logdir += str(int(time.time()))+'/'
if not os.path.exists(logdir): os.makedirs(logdir)
print 'logdir:', logdir, 'predir', predir
print 'cmmva_6layer_mnist_60000', nkerns, n_hidden, seed, drops, drop_inverses, dropout_flag, drop_inverses_flag
with open(logdir+'hook.txt', 'a') as f:
print >>f, 'logdir:', logdir, 'predir', predir
print >>f, 'cmmva_6layer_mnist_60000', nkerns, n_hidden, seed, drops, drop_inverses, dropout_flag, drop_inverses_flag
datasets = datapy.load_data_gpu_60000(dataset, have_matrix=True)
train_set_x, train_set_y, train_y_matrix = datasets[0]
valid_set_x, valid_set_y, valid_y_matrix = datasets[1]
test_set_x, test_set_y, test_y_matrix = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
y_matrix = T.imatrix('y_matrix')
random_z = T.matrix('random_z')
drop = T.iscalar('drop')
drop_inverse = T.iscalar('drop_inverse')
activation = nonlinearity.relu
rng = np.random.RandomState(seed)
rng_share = theano.tensor.shared_randomstreams.RandomStreams(0)
input_x = x.reshape((batch_size, 1, 28, 28))
recg_layer = []
cnn_output = []
#1
recg_layer.append(ConvMaxPool.ConvMaxPool(
rng,
image_shape=(batch_size, 1, 28, 28),
filter_shape=(nkerns[0], 1, 5, 5),
poolsize=(2, 2),
border_mode='valid',
activation=activation
))
if drops[0]==1:
cnn_output.append(recg_layer[-1].drop_output(input=input_x, drop=drop, rng=rng_share))
else:
cnn_output.append(recg_layer[-1].output(input=input_x))
#2
recg_layer.append(ConvMaxPool.ConvMaxPool(
rng,
image_shape=(batch_size, nkerns[0], 12, 12),
filter_shape=(nkerns[1], nkerns[0], 3, 3),
poolsize=(1, 1),
border_mode='same',
activation=activation
))
if drops[1]==1:
cnn_output.append(recg_layer[-1].drop_output(cnn_output[-1], drop=drop, rng=rng_share))
else:
cnn_output.append(recg_layer[-1].output(cnn_output[-1]))
#3
recg_layer.append(ConvMaxPool.ConvMaxPool(
rng,
image_shape=(batch_size, nkerns[1], 12, 12),
filter_shape=(nkerns[2], nkerns[1], 3, 3),
poolsize=(2, 2),
border_mode='valid',
activation=activation
))
if drops[2]==1:
cnn_output.append(recg_layer[-1].drop_output(cnn_output[-1], drop=drop, rng=rng_share))
else:
cnn_output.append(recg_layer[-1].output(cnn_output[-1]))
#4
recg_layer.append(ConvMaxPool.ConvMaxPool(
rng,
image_shape=(batch_size, nkerns[2], 5, 5),
filter_shape=(nkerns[3], nkerns[2], 3, 3),
poolsize=(1, 1),
border_mode='same',
activation=activation
))
if drops[3]==1:
cnn_output.append(recg_layer[-1].drop_output(cnn_output[-1], drop=drop, rng=rng_share))
else:
cnn_output.append(recg_layer[-1].output(cnn_output[-1]))
#5
recg_layer.append(ConvMaxPool.ConvMaxPool(
rng,
image_shape=(batch_size, nkerns[3], 5, 5),
filter_shape=(nkerns[4], nkerns[3], 3, 3),
poolsize=(1, 1),
border_mode='same',
activation=activation
))
if drops[4]==1:
cnn_output.append(recg_layer[-1].drop_output(cnn_output[-1], drop=drop, rng=rng_share))
else:
cnn_output.append(recg_layer[-1].output(cnn_output[-1]))
mlp_input_x = cnn_output[-1].flatten(2)
activations = []
#1
recg_layer.append(FullyConnected.FullyConnected(
rng=rng,
n_in= 5 * 5 * nkerns[-1],
n_out=n_hidden[0],
activation=activation
))
if drops[-1]==1:
activations.append(recg_layer[-1].drop_output(input=mlp_input_x, drop=drop, rng=rng_share))
else:
activations.append(recg_layer[-1].output(input=mlp_input_x))
features = T.concatenate(activations[start_layer:end_layer], axis=1)
color.printRed('feature dimension: '+str(np.sum(n_hidden[start_layer:end_layer])))
classifier = Pegasos.Pegasos(
input= features,
rng=rng,
n_in=np.sum(n_hidden[start_layer:end_layer]),
n_out=10,
weight_decay=0,
loss=1,
std=1e-2
)
recg_layer.append(GaussianHidden.GaussianHidden(
rng=rng,
input=activations[-1],
n_in=n_hidden[0],
n_out = n_hidden[1],
activation=None
))
z = recg_layer[-1].sample_z(rng_share)
gene_layer = []
z_output = []
random_z_output = []
#1
gene_layer.append(FullyConnected.FullyConnected(
rng=rng,
n_in=n_hidden[1],
n_out = n_hidden[0],
activation=activation
))
z_output.append(gene_layer[-1].output(input=z))
random_z_output.append(gene_layer[-1].output(input=random_z))
#2
gene_layer.append(FullyConnected.FullyConnected(
rng=rng,
n_in=n_hidden[0],
n_out = 5*5*nkerns[-1],
activation=activation
))
if drop_inverses[0]==1:
z_output.append(gene_layer[-1].drop_output(input=z_output[-1], drop=drop_inverse, rng=rng_share))
random_z_output.append(gene_layer[-1].drop_output(input=random_z_output[-1], drop=drop_inverse, rng=rng_share))
else:
z_output.append(gene_layer[-1].output(input=z_output[-1]))
random_z_output.append(gene_layer[-1].output(input=random_z_output[-1]))
input_z = z_output[-1].reshape((batch_size, nkerns[-1], 5, 5))
input_random_z = random_z_output[-1].reshape((n_batch, nkerns[-1], 5, 5))
#1
gene_layer.append(UnpoolConvNon.UnpoolConvNon(
rng,
image_shape=(batch_size, nkerns[-1], 5, 5),
filter_shape=(nkerns[-2], nkerns[-1], 3, 3),
poolsize=(1, 1),
border_mode='same',
activation=activation
))
z_output.append(gene_layer[-1].output(input=input_z))
random_z_output.append(gene_layer[-1].output_random_generation(input=input_random_z, n_batch=n_batch))
#2
gene_layer.append(UnpoolConvNon.UnpoolConvNon(
rng,
image_shape=(batch_size, nkerns[-2], 5, 5),
filter_shape=(nkerns[-3], nkerns[-2], 3, 3),
poolsize=(2, 2),
border_mode='full',
activation=activation
))
z_output.append(gene_layer[-1].output(input=z_output[-1]))
random_z_output.append(gene_layer[-1].output_random_generation(input=random_z_output[-1], n_batch=n_batch))
#3
gene_layer.append(UnpoolConvNon.UnpoolConvNon(
rng,
image_shape=(batch_size, nkerns[-3], 12, 12),
filter_shape=(nkerns[-4], nkerns[-3], 3, 3),
poolsize=(1, 1),
border_mode='same',
activation=activation
))
z_output.append(gene_layer[-1].output(input=z_output[-1]))
random_z_output.append(gene_layer[-1].output_random_generation(input=random_z_output[-1], n_batch=n_batch))
#4
gene_layer.append(UnpoolConvNon.UnpoolConvNon(
rng,
image_shape=(batch_size, nkerns[-4], 12, 12),
filter_shape=(nkerns[-5], nkerns[-4], 3, 3),
poolsize=(1, 1),
border_mode='same',
activation=activation
))
z_output.append(gene_layer[-1].output(input=z_output[-1]))
random_z_output.append(gene_layer[-1].output_random_generation(input=random_z_output[-1], n_batch=n_batch))
#5 stochastic layer
# for the last layer, the nonliearity should be sigmoid to achieve mean of Bernoulli
gene_layer.append(UnpoolConvNon.UnpoolConvNon(
rng,
image_shape=(batch_size, nkerns[-5], 12, 12),
filter_shape=(1, nkerns[-5], 5, 5),
poolsize=(2, 2),
border_mode='full',
activation=nonlinearity.sigmoid
))
z_output.append(gene_layer[-1].output(input=z_output[-1]))
random_z_output.append(gene_layer[-1].output_random_generation(input=random_z_output[-1], n_batch=n_batch))
gene_layer.append(NoParamsBernoulliVisiable.NoParamsBernoulliVisiable(
#rng=rng,
#mean=z_output[-1],
#data=input_x,
))
logpx = gene_layer[-1].logpx(mean=z_output[-1], data=input_x)
# 4-D tensor of random generation
random_x_mean = random_z_output[-1]
random_x = gene_layer[-1].sample_x(rng_share, random_x_mean)
#L = (logpx + logpz - logqz).sum()
lowerbound = (
(logpx + recg_layer[-1].logpz - recg_layer[-1].logqz).sum()
)
hinge_loss = classifier.hinge_loss(10, y, y_matrix) * batch_size
#
# D is redundent, you could just set D = 1 and tune C and weight decay parameters
# beacuse AdaM is scale-invariant
#
cost = D * lowerbound - C * hinge_loss #- classifier.L2_reg
px = (logpx.sum())
pz = (recg_layer[-1].logpz.sum())
qz = (- recg_layer[-1].logqz.sum())
params=[]
for g in gene_layer:
params+=g.params
for r in recg_layer:
params+=r.params
params+=classifier.params
gparams = [T.grad(cost, param) for param in params]
weight_decay=1.0/n_train_batches
epsilon=1e-8
#get_optimizer = optimizer.get_adam_optimizer(learning_rate=learning_rate)
l_r = theano.shared(np.asarray(learning_rate, dtype=np.float32))
get_optimizer = optimizer.get_adam_optimizer_max(learning_rate=l_r,
decay1=0.1, decay2=0.001, weight_decay=weight_decay, epsilon=epsilon)
with open(logdir+'hook.txt', 'a') as f:
print >>f, 'AdaM', learning_rate, weight_decay, epsilon
updates = get_optimizer(params,gparams)
# compiling a Theano function that computes the mistakes that are made
# by the model on a minibatch
test_model = theano.function(
inputs=[index],
outputs=[classifier.errors(y), lowerbound, hinge_loss, cost],
#outputs=layer[-1].errors(y),
givens={
x: test_set_x[index * batch_size:(index + 1) * batch_size],
y: test_set_y[index * batch_size:(index + 1) * batch_size],
y_matrix: test_y_matrix[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](0),
drop_inverse: np.cast['int32'](0)
}
)
validate_model = theano.function(
inputs=[index],
outputs=[classifier.errors(y), lowerbound, hinge_loss, cost],
#outputs=layer[-1].errors(y),
givens={
x: valid_set_x[index * batch_size:(index + 1) * batch_size],
y: valid_set_y[index * batch_size:(index + 1) * batch_size],
y_matrix: valid_y_matrix[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](0),
drop_inverse: np.cast['int32'](0)
}
)
'''
Save parameters and activations
'''
parameters = theano.function(
inputs=[],
outputs=params,
)
train_activations = theano.function(
inputs=[index],
outputs=T.concatenate(activations, axis=1),
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](0),
#drop_inverse: np.cast['int32'](0)
#y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
valid_activations = theano.function(
inputs=[index],
outputs=T.concatenate(activations, axis=1),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](0),
#drop_inverse: np.cast['int32'](0)
#y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
test_activations = theano.function(
inputs=[index],
outputs=T.concatenate(activations, axis=1),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](0),
#drop_inverse: np.cast['int32'](0)
#y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# compiling a Theano function `train_model` that returns the cost, but
# in the same time updates the parameter of the model based on the rules
# defined in `updates`
debug_model = theano.function(
inputs=[index],
outputs=[classifier.errors(y), lowerbound, px, pz, qz, hinge_loss, cost],
#updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size],
y_matrix: train_y_matrix[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](dropout_flag),
drop_inverse: np.cast['int32'](drop_inverses_flag)
}
)
random_generation = theano.function(
inputs=[random_z],
outputs=[random_x_mean.flatten(2), random_x.flatten(2)],
givens={
#drop: np.cast['int32'](0),
drop_inverse: np.cast['int32'](0)
}
)
train_bound_without_dropout = theano.function(
inputs=[index],
outputs=[classifier.errors(y), lowerbound, hinge_loss, cost],
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size],
y_matrix: train_y_matrix[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](0),
drop_inverse: np.cast['int32'](0)
}
)
train_model = theano.function(
inputs=[index],
outputs=[classifier.errors(y), lowerbound, hinge_loss, cost],
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size],
y_matrix: train_y_matrix[index * batch_size: (index + 1) * batch_size],
drop: np.cast['int32'](dropout_flag),
drop_inverse: np.cast['int32'](drop_inverses_flag)
}
)
# end-snippet-5
##################
# Pretrain MODEL #
##################
if predir is not None:
color.printBlue('... setting parameters')
color.printBlue(predir)
pre_train = np.load(predir+'model.npz')
pre_train = pre_train['model']
# params include w and b, exclude it
for (para, pre) in zip(params[:-2], pre_train):
#print pre.shape
para.set_value(pre)
tmp = [debug_model(i) for i in xrange(n_train_batches)]
tmp = (np.asarray(tmp)).mean(axis=0) / float(batch_size)
print '------------------', tmp[1:5]
# valid_error test_error epochs
predy_test_stats = [1, 1, 0]
predy_valid_stats = [1, 1, 0]
best_validation_bound = -1000000.0
best_iter = 0
test_score = 0.
start_time = time.clock()
NaN_count = 0
epoch = 0
threshold = 0
validation_frequency = 1
generatition_frequency = 10
if predir is not None:
threshold = 0
color.printRed('threshold, '+str(threshold) +
' generatition_frequency, '+str(generatition_frequency)
+' validation_frequency, '+str(validation_frequency))
done_looping = False
decay_epochs=500
n_epochs=600
'''
print 'test initialization...'
pre_model = parameters()
for i in xrange(len(pre_model)):
pre_model[i] = np.asarray(pre_model[i])
print pre_model[i].shape, np.mean(pre_model[i]), np.var(pre_model[i])
print 'end test...'
'''
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
train_error = 0
train_lowerbound = 0
train_hinge_loss = 0
train_obj = 0
test_epoch = epoch - decay_epochs
if test_epoch > 0 and test_epoch % 10 == 0:
print l_r.get_value()
with open(logdir+'hook.txt', 'a') as f:
print >>f,l_r.get_value()
l_r.set_value(np.cast['float32'](l_r.get_value()/3.0))
tmp_start1 = time.clock()
for minibatch_index in xrange(n_train_batches):
#print n_train_batches
e, l, h, o = train_model(minibatch_index)
train_error += e
train_lowerbound += l
train_hinge_loss += h
train_obj += o
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if math.isnan(train_lowerbound):
NaN_count+=1
color.printRed("NaN detected. Reverting to saved best parameters")
print '---------------NaN_count:', NaN_count
with open(logdir+'hook.txt', 'a') as f:
print >>f, '---------------NaN_count:', NaN_count
tmp = [debug_model(i) for i in xrange(n_train_batches)]
tmp = (np.asarray(tmp)).mean(axis=0) / float(batch_size)
tmp[0]*=batch_size
print '------------------NaN check:', tmp
with open(logdir+'hook.txt', 'a') as f:
print >>f, '------------------NaN check:', tmp
model = parameters()
for i in xrange(len(model)):
model[i] = np.asarray(model[i]).astype(np.float32)
print model[i].shape, np.mean(model[i]), np.var(model[i])
print np.max(model[i]), np.min(model[i])
print np.all(np.isfinite(model[i])), np.any(np.isnan(model[i]))
with open(logdir+'hook.txt', 'a') as f:
print >>f, model[i].shape, np.mean(model[i]), np.var(model[i])
print >>f, np.max(model[i]), np.min(model[i])
print >>f, np.all(np.isfinite(model[i])), np.any(np.isnan(model[i]))
best_before = np.load(logdir+'model.npz')
best_before = best_before['model']
for (para, pre) in zip(params, best_before):
para.set_value(pre)
tmp = [debug_model(i) for i in xrange(n_train_batches)]
tmp = (np.asarray(tmp)).mean(axis=0) / float(batch_size)
tmp[0]*=batch_size
print '------------------', tmp
continue
n_train=n_train_batches*batch_size
#print 'optimization_time', time.clock() - tmp_start1
print epoch, 'stochastic training error', train_error / float(batch_size), train_lowerbound / float(n_train), train_hinge_loss / float(n_train), train_obj / float(n_train)
with open(logdir+'hook.txt', 'a') as f:
print >>f, epoch, 'stochastic training error', train_error / float(batch_size), train_lowerbound / float(n_train), train_hinge_loss / float(n_train), train_obj / float(n_train)
if epoch % validation_frequency == 0:
tmp_start2 = time.clock()
# compute zero-one loss on validation set
#train_stats = [train_bound_without_dropout(i) for i
# in xrange(n_train_batches)]
#this_train_stats = np.mean(train_stats, axis=0)
#this_train_stats[1:] = this_train_stats[1:]/ float(batch_size)
test_stats = [test_model(i) for i in xrange(n_test_batches)]
this_test_stats = np.mean(test_stats, axis=0)
this_test_stats[1:] = this_test_stats[1:]/ float(batch_size)
print epoch, 'test error', this_test_stats
with open(logdir+'hook.txt', 'a') as f:
print >>f, epoch, 'test error', this_test_stats
if epoch%100==0:
model = parameters()
for i in xrange(len(model)):
model[i] = np.asarray(model[i]).astype(np.float32)
#print model[i].shape, np.mean(model[i]), np.var(model[i])
np.savez(logdir+'model-'+str(epoch), model=model)
tmp_start4=time.clock()
if epoch % generatition_frequency == 0:
tail='-'+str(epoch)+'.png'
random_z = np.random.standard_normal((n_batch, n_hidden[-1])).astype(np.float32)
_x_mean, _x = random_generation(random_z)
#print _x.shape
#print _x_mean.shape
image = paramgraphics.mat_to_img(_x.T, dim_input, colorImg=colorImg)
image.save(logdir+'samples'+tail, 'PNG')
image = paramgraphics.mat_to_img(_x_mean.T, dim_input, colorImg=colorImg)
image.save(logdir+'mean_samples'+tail, 'PNG')
#print 'generation_time', time.clock() - tmp_start4
end_time = time.clock()
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
if NaN_count > 0:
print '---------------NaN_count:', NaN_count
with open(logdir+'hook.txt', 'a') as f:
print >>f, '---------------NaN_count:', NaN_count
if __name__ == '__main__':
predir = None
if os.environ.has_key('predir'):
predir = os.environ['predir']
learning_rate=1e-3
if os.environ.has_key('learning_rate'):
learning_rate = float(os.environ['learning_rate'])
dropout_flag = 1
if os.environ.has_key('dropout_flag'):
dropout_flag = int(os.environ['dropout_flag'])
drop_inverses_flag = 0
if os.environ.has_key('drop_inverses_flag'):
drop_inverses_flag = int(os.environ['drop_inverses_flag'])
cmmva_6layer_dropout_mnist_60000(drop_inverses_flag=drop_inverses_flag,
dropout_flag=dropout_flag, predir=predir,learning_rate=learning_rate)
|
|
import os
from .. import constants, logger
from . import (
base_classes,
texture,
material,
geometry,
object as object_,
utilities,
io,
api
)
class Scene(base_classes.BaseScene):
"""Class that handles the contruction of a Three scene"""
_defaults = {
constants.METADATA: constants.DEFAULT_METADATA.copy(),
constants.GEOMETRIES: [],
constants.MATERIALS: [],
constants.IMAGES: [],
constants.TEXTURES: []
}
def __init__(self, filepath, options=None):
logger.debug("Scene().__init__(%s, %s)", filepath, options)
base_classes.BaseScene.__init__(self, filepath, options or {})
source_file = api.scene_name()
if source_file:
self[constants.METADATA][constants.SOURCE_FILE] = source_file
@property
def valid_types(self):
"""
:return: list of valid node types
"""
valid_types = [api.constants.MESH]
if self.options.get(constants.CAMERAS):
logger.info("Adding cameras to valid object types")
valid_types.append(api.constants.CAMERA)
if self.options.get(constants.LIGHTS):
logger.info("Adding lights to valid object types")
valid_types.append(api.constants.LAMP)
return valid_types
def geometry(self, value):
"""Find a geometry node that matches either a name
or uuid value.
:param value: name or uuid
:type value: str
"""
logger.debug("Scene().geometry(%s)", value)
return _find_node(value, self[constants.GEOMETRIES])
def image(self, value):
"""Find a image node that matches either a name
or uuid value.
:param value: name or uuid
:type value: str
"""
logger.debug("Scene().image%s)", value)
return _find_node(value, self[constants.IMAGES])
def material(self, value):
"""Find a material node that matches either a name
or uuid value.
:param value: name or uuid
:type value: str
"""
logger.debug("Scene().material(%s)", value)
return _find_node(value, self[constants.MATERIALS])
def parse(self):
"""Execute the parsing of the scene"""
logger.debug("Scene().parse()")
if self.options.get(constants.MAPS):
self._parse_textures()
if self.options.get(constants.MATERIALS):
self._parse_materials()
self._parse_geometries()
self._parse_objects()
def texture(self, value):
"""Find a texture node that matches either a name
or uuid value.
:param value: name or uuid
:type value: str
"""
logger.debug("Scene().texture(%s)", value)
return _find_node(value, self[constants.TEXTURES])
def write(self):
"""Write the parsed scene to disk."""
logger.debug("Scene().write()")
data = {}
embed_anim = self.options.get(constants.EMBED_ANIMATION, True)
embed = self.options.get(constants.EMBED_GEOMETRY, True)
compression = self.options.get(constants.COMPRESSION)
extension = constants.EXTENSIONS.get(
compression,
constants.EXTENSIONS[constants.JSON])
export_dir = os.path.dirname(self.filepath)
for key, value in self.items():
if key == constants.GEOMETRIES:
geometries = []
for geom in value:
if not embed_anim:
geom.write_animation(export_dir)
geom_data = geom.copy()
if embed:
geometries.append(geom_data)
continue
geo_type = geom_data[constants.TYPE].lower()
if geo_type == constants.GEOMETRY.lower():
geom_data.pop(constants.DATA)
elif geo_type == constants.BUFFER_GEOMETRY.lower():
geom_data.pop(constants.ATTRIBUTES)
geom_data.pop(constants.METADATA)
url = 'geometry.%s%s' % (geom.node, extension)
geometry_file = os.path.join(export_dir, url)
geom.write(filepath=geometry_file)
geom_data[constants.URL] = os.path.basename(url)
geometries.append(geom_data)
data[key] = geometries
elif isinstance(value, list):
data[key] = []
for each in value:
data[key].append(each.copy())
elif isinstance(value, dict):
data[key] = value.copy()
io.dump(self.filepath, data, options=self.options)
if self.options.get(constants.COPY_TEXTURES):
texture_folder = self.options.get(constants.TEXTURE_FOLDER)
for geo in self[constants.GEOMETRIES]:
logger.info("Copying textures from %s", geo.node)
geo.copy_textures(texture_folder)
def _parse_geometries(self):
"""Locate all geometry nodes and parse them"""
logger.debug("Scene()._parse_geometries()")
# this is an important step. please refer to the doc string
# on the function for more information
api.object.prep_meshes(self.options)
geometries = []
# now iterate over all the extracted mesh nodes and parse each one
for mesh in api.object.extracted_meshes():
logger.info("Parsing geometry %s", mesh)
geo = geometry.Geometry(mesh, self)
geo.parse()
geometries.append(geo)
logger.info("Added %d geometry nodes", len(geometries))
self[constants.GEOMETRIES] = geometries
def _parse_materials(self):
"""Locate all non-orphaned materials and parse them"""
logger.debug("Scene()._parse_materials()")
materials = []
for material_name in api.material.used_materials():
logger.info("Parsing material %s", material_name)
materials.append(material.Material(material_name, parent=self))
logger.info("Added %d material nodes", len(materials))
self[constants.MATERIALS] = materials
def _parse_objects(self):
"""Locate all valid objects in the scene and parse them"""
logger.debug("Scene()._parse_objects()")
try:
scene_name = self[constants.METADATA][constants.SOURCE_FILE]
except KeyError:
scene_name = constants.SCENE
self[constants.OBJECT] = object_.Object(None, parent=self)
self[constants.OBJECT][constants.TYPE] = constants.SCENE.title()
self[constants.UUID] = utilities.id_from_name(scene_name)
objects = []
for node in api.object.nodes(self.valid_types, self.options):
logger.info("Parsing object %s", node)
obj = object_.Object(node, parent=self[constants.OBJECT])
objects.append(obj)
logger.info("Added %d object nodes", len(objects))
self[constants.OBJECT][constants.CHILDREN] = objects
def _parse_textures(self):
"""Locate all non-orphaned textures and parse them"""
logger.debug("Scene()._parse_textures()")
textures = []
for texture_name in api.texture.textures():
logger.info("Parsing texture %s", texture_name)
tex_inst = texture.Texture(texture_name, self)
textures.append(tex_inst)
logger.info("Added %d texture nodes", len(textures))
self[constants.TEXTURES] = textures
def _find_node(value, manifest):
"""Find a node that matches either a name
or uuid value.
:param value: name or uuid
:param manifest: manifest of nodes to search
:type value: str
:type manifest: list
"""
for index in manifest:
uuid = index.get(constants.UUID) == value
name = index.node == value
if uuid or name:
return index
else:
logger.debug("No matching node for %s", value)
|
|
from copy import copy
import pytest
from diofant import (Dict, ImmutableDenseNDimArray, ImmutableSparseNDimArray,
Indexed, IndexedBase, Matrix, Rational, SparseMatrix,
Symbol)
from diofant.abc import i, j, w, x, y, z
__all__ = ()
def test_ndim_array_initiation():
arr_with_one_element = ImmutableDenseNDimArray([23])
assert len(arr_with_one_element) == 1
assert arr_with_one_element[0] == 23
assert arr_with_one_element[:] == [23]
assert arr_with_one_element.rank() == 1
arr_with_symbol_element = ImmutableDenseNDimArray([Symbol('x')])
assert len(arr_with_symbol_element) == 1
assert arr_with_symbol_element[0] == Symbol('x')
assert arr_with_symbol_element[:] == [Symbol('x')]
assert arr_with_symbol_element.rank() == 1
number5 = 5
vector = ImmutableDenseNDimArray.zeros(number5)
assert len(vector) == number5
assert vector.shape == (number5,)
assert vector.rank() == 1
vector = ImmutableSparseNDimArray.zeros(number5)
assert len(vector) == number5
assert vector.shape == (number5,)
assert vector._sparse_array == Dict()
assert vector.rank() == 1
n_dim_array = ImmutableDenseNDimArray(range(3**4), (3, 3, 3, 3,))
assert len(n_dim_array) == 3 * 3 * 3 * 3
assert n_dim_array.shape == (3, 3, 3, 3)
assert n_dim_array.rank() == 4
array_shape = (3, 3, 3, 3)
sparse_array = ImmutableSparseNDimArray.zeros(*array_shape)
assert len(sparse_array._sparse_array) == 0
assert len(sparse_array) == 3 * 3 * 3 * 3
assert n_dim_array.shape == array_shape
assert n_dim_array.rank() == 4
one_dim_array = ImmutableDenseNDimArray([2, 3, 1])
assert len(one_dim_array) == 3
assert one_dim_array.shape == (3,)
assert one_dim_array.rank() == 1
assert one_dim_array.tolist() == [2, 3, 1]
shape = (3, 3)
array_with_many_args = ImmutableSparseNDimArray.zeros(*shape)
assert len(array_with_many_args) == 3 * 3
assert array_with_many_args.shape == shape
assert array_with_many_args[0, 0] == 0
assert array_with_many_args.rank() == 2
def test_reshape():
array = ImmutableDenseNDimArray(range(50), 50)
assert array.shape == (50,)
assert array.rank() == 1
array = array.reshape(5, 5, 2)
assert array.shape == (5, 5, 2)
assert array.rank() == 3
assert len(array) == 50
pytest.raises(ValueError, lambda: array.reshape(1, 1))
def test_iterator():
array = ImmutableDenseNDimArray(range(4), (2, 2))
j = 0
for i in array:
assert i == j
j += 1
array = array.reshape(4)
j = 0
for i in array:
assert i == j
j += 1
def test_sparse():
sparse_array = ImmutableSparseNDimArray([0, 0, 0, 1], (2, 2))
assert len(sparse_array) == 2 * 2
# dictionary where all data is, only non-zero entries are actually stored:
assert len(sparse_array._sparse_array) == 1
assert list(sparse_array) == [0, 0, 0, 1]
for i, j in zip(sparse_array, [0, 0, 0, 1]):
assert i == j
def sparse_assignment():
sparse_array[0, 0] = 123
assert len(sparse_array._sparse_array) == 1
pytest.raises(TypeError, sparse_assignment)
assert len(sparse_array._sparse_array) == 1
assert sparse_array[0, 0] == 0
def test_calculation():
a = ImmutableDenseNDimArray([1]*9, (3, 3))
b = ImmutableDenseNDimArray([9]*9, (3, 3))
c = a + b
for i in c:
assert i == 10
assert c == ImmutableDenseNDimArray([10]*9, (3, 3))
assert c == ImmutableSparseNDimArray([10]*9, (3, 3))
c = b - a
for i in c:
assert i == 8
assert c == ImmutableDenseNDimArray([8]*9, (3, 3))
assert c == ImmutableSparseNDimArray([8]*9, (3, 3))
def test_ndim_array_converting():
dense_array = ImmutableDenseNDimArray([1, 2, 3, 4], (2, 2))
alist = dense_array.tolist()
alist == [[1, 2], [3, 4]]
matrix = dense_array.tomatrix()
assert isinstance(matrix, Matrix)
for i in range(len(dense_array)):
assert dense_array[i] == matrix[i]
assert matrix.shape == dense_array.shape
assert ImmutableDenseNDimArray(matrix) == dense_array
assert ImmutableDenseNDimArray(matrix.as_immutable()) == dense_array
assert ImmutableDenseNDimArray(matrix.as_mutable()) == dense_array
sparse_array = ImmutableSparseNDimArray([1, 2, 3, 4], (2, 2))
alist = sparse_array.tolist()
assert alist == [[1, 2], [3, 4]]
matrix = sparse_array.tomatrix()
assert isinstance(matrix, SparseMatrix)
pytest.raises(ValueError,
lambda: ImmutableDenseNDimArray([1]*6, (2, 2, 2)).tomatrix())
for i in range(len(sparse_array)):
assert sparse_array[i] == matrix[i]
assert matrix.shape == sparse_array.shape
assert ImmutableSparseNDimArray(matrix) == sparse_array
assert ImmutableSparseNDimArray(matrix.as_immutable()) == sparse_array
assert ImmutableSparseNDimArray(matrix.as_mutable()) == sparse_array
def test_converting_functions():
arr_list = [1, 2, 3, 4]
arr_matrix = Matrix(((1, 2), (3, 4)))
# list
arr_ndim_array = ImmutableDenseNDimArray(arr_list, (2, 2))
assert isinstance(arr_ndim_array, ImmutableDenseNDimArray)
assert arr_matrix.tolist() == arr_ndim_array.tolist()
# Matrix
arr_ndim_array = ImmutableDenseNDimArray(arr_matrix)
assert isinstance(arr_ndim_array, ImmutableDenseNDimArray)
assert arr_matrix.tolist() == arr_ndim_array.tolist()
assert arr_matrix.shape == arr_ndim_array.shape
def test_equality():
first_list = [1, 2, 3, 4]
second_list = [1, 2, 3, 4]
third_list = [4, 3, 2, 1]
assert first_list == second_list
assert first_list != third_list
first_ndim_array = ImmutableDenseNDimArray(first_list, (2, 2))
second_ndim_array = ImmutableDenseNDimArray(second_list, (2, 2))
fourth_ndim_array = ImmutableDenseNDimArray(first_list, (2, 2))
assert first_ndim_array == second_ndim_array
def assignment_attempt(a):
a[0, 0] = 0
pytest.raises(TypeError, lambda: assignment_attempt(second_ndim_array))
assert first_ndim_array == second_ndim_array
assert first_ndim_array == fourth_ndim_array
assert hash(first_ndim_array) == hash(second_ndim_array)
def test_arithmetic():
a = ImmutableDenseNDimArray([3 for i in range(9)], (3, 3))
b = ImmutableDenseNDimArray([7 for i in range(9)], (3, 3))
c1 = a + b
c2 = b + a
assert c1 == c2
d1 = a - b
d2 = b - a
assert d1 == d2 * (-1)
e1 = a * 5
e2 = 5 * a
e3 = copy(a)
e3 *= 5
assert e1 == e2 == e3
f1 = a / 5
f2 = copy(a)
f2 /= 5
assert f1 == f2
assert f1[0, 0] == f1[0, 1] == f1[0, 2] == f1[1, 0] == f1[1, 1] == \
f1[1, 2] == f1[2, 0] == f1[2, 1] == f1[2, 2] == Rational(3, 5)
assert type(a) == type(b) == type(c1) == type(c2) == type(d1) == type(d2) \
== type(e1) == type(e2) == type(e3) == type(f1)
def test_higher_dimenions():
m3 = ImmutableDenseNDimArray(range(10, 34), (2, 3, 4))
assert m3.tolist() == [[[10, 11, 12, 13],
[14, 15, 16, 17],
[18, 19, 20, 21]],
[[22, 23, 24, 25],
[26, 27, 28, 29],
[30, 31, 32, 33]]]
assert m3._get_tuple_index(0) == (0, 0, 0)
assert m3._get_tuple_index(1) == (0, 0, 1)
assert m3._get_tuple_index(4) == (0, 1, 0)
assert m3._get_tuple_index(12) == (1, 0, 0)
assert str(m3) == '[[[10, 11, 12, 13], [14, 15, 16, 17], [18, 19, 20, 21]], [[22, 23, 24, 25], [26, 27, 28, 29], [30, 31, 32, 33]]]'
m3_rebuilt = ImmutableDenseNDimArray([[[10, 11, 12, 13], [14, 15, 16, 17], [18, 19, 20, 21]], [[22, 23, 24, 25], [26, 27, 28, 29], [30, 31, 32, 33]]])
assert m3 == m3_rebuilt
m3_other = ImmutableDenseNDimArray([[[10, 11, 12, 13], [14, 15, 16, 17], [18, 19, 20, 21]], [[22, 23, 24, 25], [26, 27, 28, 29], [30, 31, 32, 33]]], (2, 3, 4))
assert m3 == m3_other
def test_rebuild_immutable_arrays():
sparr = ImmutableSparseNDimArray(range(10, 34), (2, 3, 4))
densarr = ImmutableDenseNDimArray(range(10, 34), (2, 3, 4))
assert sparr == sparr.func(*sparr.args)
assert densarr == densarr.func(*densarr.args)
def test_slices():
md = ImmutableDenseNDimArray(range(10, 34), (2, 3, 4))
assert md[:] == md._array
assert md[:, :, 0].tomatrix() == Matrix([[10, 14, 18], [22, 26, 30]])
assert md[0, 1:2, :].tomatrix() == Matrix([[14, 15, 16, 17]])
assert md[0, 1:3, :].tomatrix() == Matrix([[14, 15, 16, 17], [18, 19, 20, 21]])
assert md[:, :, :] == md
sd = ImmutableSparseNDimArray(range(10, 34), (2, 3, 4))
assert sd == ImmutableSparseNDimArray(md)
assert sd[:] == md._array
assert sd[:] == list(sd)
assert sd[:, :, 0].tomatrix() == Matrix([[10, 14, 18], [22, 26, 30]])
assert sd[0, 1:2, :].tomatrix() == Matrix([[14, 15, 16, 17]])
assert sd[0, 1:3, :].tomatrix() == Matrix([[14, 15, 16, 17], [18, 19, 20, 21]])
assert sd[:, :, :] == sd
def test_diff_and_applyfunc():
md = ImmutableDenseNDimArray([[x, y], [x*z, x*y*z]])
assert md.diff(x) == ImmutableDenseNDimArray([[1, 0], [z, y*z]])
sd = ImmutableSparseNDimArray(md)
assert sd == ImmutableSparseNDimArray([x, y, x*z, x*y*z], (2, 2))
assert sd.diff(x) == ImmutableSparseNDimArray([[1, 0], [z, y*z]])
mdn = md.applyfunc(lambda x: x*3)
assert mdn == ImmutableDenseNDimArray([[3*x, 3*y], [3*x*z, 3*x*y*z]])
assert md != mdn
sdn = sd.applyfunc(lambda x: x/2)
assert sdn == ImmutableSparseNDimArray([[x/2, y/2], [x*z/2, x*y*z/2]])
assert sd != sdn
def test_op_priority():
md = ImmutableDenseNDimArray([1, 2, 3])
e1 = (1+x)*md
e2 = md*(1+x)
assert e1 == ImmutableDenseNDimArray([1+x, 2+2*x, 3+3*x])
assert e1 == e2
e3 = (1+x)*md
e4 = md*(1+x)
assert e3 == ImmutableDenseNDimArray([1+x, 2+2*x, 3+3*x])
assert e3 == e4
def test_symbolic_indexing():
M = ImmutableDenseNDimArray([[x, y], [z, w]])
Mij = M[i, j]
assert isinstance(Mij, Indexed)
Ms = ImmutableSparseNDimArray([[2, 3*x], [4, 5]])
msij = Ms[i, j]
assert isinstance(msij, Indexed)
for oi, oj in [(0, 0), (0, 1), (1, 0), (1, 1)]:
assert Mij.subs({i: oi, j: oj}) == M[oi, oj]
assert msij.subs({i: oi, j: oj}) == Ms[oi, oj]
A = IndexedBase('A', (0, 2))
assert A[0, 0].subs({A: M}) == x
assert A[i, j].subs({A: M}) == M[i, j]
assert M[i, j].subs({M: A}) == A[i, j]
assert isinstance(M[3 * i - 2, j], Indexed)
assert M[3 * i - 2, j].subs({i: 1, j: 0}) == M[1, 0]
assert isinstance(M[i, 0], Indexed)
assert M[i, 0].subs({i: 0}) == M[0, 0]
assert M[0, i].subs({i: 1}) == M[0, 1]
Mo = ImmutableDenseNDimArray([1, 2, 3])
assert Mo[i].subs({i: 1}) == 2
Mos = ImmutableSparseNDimArray([1, 2, 3])
assert Mos[i].subs({i: 1}) == 2
pytest.raises(ValueError, lambda: M[i, 2])
pytest.raises(ValueError, lambda: M[i, -1])
pytest.raises(ValueError, lambda: M[2, i])
pytest.raises(ValueError, lambda: M[-1, i])
pytest.raises(ValueError, lambda: Ms[i, 2])
pytest.raises(ValueError, lambda: Ms[i, -1])
pytest.raises(ValueError, lambda: Ms[2, i])
pytest.raises(ValueError, lambda: Ms[-1, i])
|
|
"""
desispec.io.brick
=================
I/O routines for working with per-brick files.
See ``doc/DESI_SPECTRO_REDUX/SPECPROD/bricks/BRICKID/*-BRICKID.rst`` in desidatamodel
for a description of the relevant data models.
See :doc:`coadd` and `DESI-doc-1056 <https://desi.lbl.gov/DocDB/cgi-bin/private/ShowDocument?docid=1056>`_
for general information about the coaddition dataflow and algorithms.
"""
import os
import os.path
import re
import warnings
import numpy as np
import astropy.io.fits
from desiutil.depend import add_dependencies
import desispec.io.util
#- For backwards compatibility, derive brickname from filename
def _parse_brick_filename(filepath):
"""return (channel, brickname) from /path/to/brick-[brz]-{brickname}.fits
"""
filename = os.path.basename(filepath)
warnings.warn('Deriving channel and brickname from filename {} instead of contents'.format(filename))
m = re.match('brick-([brz])-(\w+).fits', filename)
if m is None:
raise ValueError('Unable to derive channel and brickname from '+filename)
else:
return m.groups() #- (channel, brickname)
class BrickBase(object):
"""Represents objects in a single brick and possibly also a single band b,r,z.
The constructor will open an existing file and create a new file and parent
directory if necessary. The :meth:`close` method must be called for any updates
or new data to be recorded. Successful completion of the constructor does not
guarantee that :meth:`close` will succeed.
Args:
path(str): Path to the brick file to open.
mode(str): File access mode to use. Should normally be 'readonly' or 'update'. Use 'update' to create a new file and its parent directory if necessary.
header: An optional header specification used to create a new file. See :func:`desispec.io.util.fitsheader` for details on allowed values.
Raises:
RuntimeError: Invalid mode requested.
IOError: Unable to open existing file in 'readonly' mode.
OSError: Unable to create a new parent directory in 'update' mode.
"""
def __init__(self,path,mode = 'readonly',header = None):
if mode not in ('readonly','update'):
raise RuntimeError('Invalid mode %r' % mode)
self.path = path
self.mode = mode
# Create a new file if necessary.
if self.mode == 'update' and not os.path.exists(self.path):
# BRICKNAM must be in header if creating the file for the first time
if header is None or 'BRICKNAM' not in header:
raise ValueError('header must have BRICKNAM when creating new brick file')
self.brickname = header['BRICKNAM']
if 'CHANNEL' in header:
self.channel = header['CHANNEL']
else:
self.channel = 'brz' #- could be any spectrograph channel
# Create the parent directory, if necessary.
head,tail = os.path.split(self.path)
if not os.path.exists(head):
os.makedirs(head)
# Create empty HDUs. It would be good to refactor io.frame to avoid any duplication here.
hdr = desispec.io.util.fitsheader(header)
add_dependencies(hdr)
hdr['EXTNAME'] = ('FLUX', 'no dimension')
hdu0 = astropy.io.fits.PrimaryHDU(header = hdr)
hdr['EXTNAME'] = ('IVAR', 'no dimension')
hdu1 = astropy.io.fits.ImageHDU(header = hdr)
hdr['EXTNAME'] = ('WAVELENGTH', '[Angstroms]')
hdu2 = astropy.io.fits.ImageHDU(header = hdr)
hdr['EXTNAME'] = ('RESOLUTION', 'no dimension')
hdu3 = astropy.io.fits.ImageHDU(header = hdr)
# Create an HDU4 using the columns from fibermap with a few extras added.
columns = desispec.io.fibermap.fibermap_columns[:]
columns.extend([
('NIGHT','i4'),
('EXPID','i4'),
('INDEX','i4'),
])
data = np.empty(shape = (0,),dtype = columns)
hdr = desispec.io.util.fitsheader(header)
#- ignore incorrect and harmless fits TDIM7 warning for
#- FILTER column that is a 2D array of strings
with warnings.catch_warnings():
warnings.simplefilter('ignore')
hdu4 = astropy.io.fits.BinTableHDU(data=data, header=hdr, name='FIBERMAP')
# Add comments for fibermap columns.
num_fibermap_columns = len(desispec.io.fibermap.fibermap_comments)
for i in range(1,1+num_fibermap_columns):
key = 'TTYPE%d' % i
name = hdu4.header[key]
comment = desispec.io.fibermap.fibermap_comments[name]
hdu4.header[key] = (name,comment)
# Add comments for our additional columns.
hdu4.header['TTYPE%d' % (1+num_fibermap_columns)] = ('NIGHT','Night of exposure YYYYMMDD')
hdu4.header['TTYPE%d' % (2+num_fibermap_columns)] = ('EXPID','Exposure ID')
hdu4.header['TTYPE%d' % (3+num_fibermap_columns)] = ('INDEX','Index of this object in other HDUs')
self.hdu_list = astropy.io.fits.HDUList([hdu0,hdu1,hdu2,hdu3,hdu4])
else:
self.hdu_list = astropy.io.fits.open(path,mode = self.mode)
try:
self.brickname = self.hdu_list[0].header['BRICKNAM']
self.channel = self.hdu_list[0].header['CHANNEL']
except KeyError:
self.channel, self.brickname = _parse_brick_filename(path)
def add_objects(self,flux,ivar,wave,resolution):
"""Add a list of objects to this brick file from the same night and exposure.
Args:
flux(numpy.ndarray): Array of (nobj,nwave) flux values for nobj objects tabulated at nwave wavelengths.
ivar(numpy.ndarray): Array of (nobj,nwave) inverse-variance values.
wave(numpy.ndarray): Array of (nwave,) wavelength values in Angstroms. All objects are assumed to use the same wavelength grid.
resolution(numpy.ndarray): Array of (nobj,nres,nwave) resolution matrix elements.
Raises:
RuntimeError: Can only add objects in update mode.
"""
if self.mode != 'update':
raise RuntimeError('Can only add objects in update mode.')
# Concatenate the new per-object image HDU data or use it to initialize the HDU.
# HDU2 contains the wavelength grid shared by all objects so we only add it once.
if self.hdu_list[0].data is not None:
self.hdu_list[0].data = np.concatenate((self.hdu_list[0].data,flux,))
self.hdu_list[1].data = np.concatenate((self.hdu_list[1].data,ivar,))
assert np.array_equal(self.hdu_list[2].data,wave),'Wavelength arrays do not match.'
self.hdu_list[3].data = np.concatenate((self.hdu_list[3].data,resolution,))
else:
self.hdu_list[0].data = flux
self.hdu_list[1].data = ivar
self.hdu_list[2].data = wave
self.hdu_list[3].data = resolution
def get_wavelength_grid(self):
"""Return the wavelength grid used in this brick file.
"""
return self.hdu_list[2].data
def get_target(self,target_id):
"""Get the spectra and info for one target ID.
Args:
target_id(int): Target ID number to lookup.
Returns:
tuple: Tuple of numpy arrays (flux,ivar,resolution,info) of data associated
with this target ID. The flux,ivar,resolution arrays will have one entry
for each spectrum and the info array will have one entry per exposure.
The returned arrays are slices into the FITS file HDU data arrays, so this
call is relatively cheap (and any changes will be saved to the file if it
was opened in update mode.)
"""
exposures = (self.hdu_list[4].data['TARGETID'] == target_id)
index_list = np.unique(self.hdu_list[4].data['INDEX'][exposures])
return (self.hdu_list[0].data[index_list],self.hdu_list[1].data[index_list],
self.hdu_list[3].data[index_list],self.hdu_list[4].data[exposures])
def get_target_ids(self):
"""Return set of unique target IDs in this brick.
"""
return list(set(self.hdu_list[4].data['TARGETID']))
def get_num_spectra(self):
"""Get the number of spectra contained in this brick file.
Returns:
int: Number of objects contained in this brick file.
"""
return len(self.hdu_list[0].data)
def get_num_targets(self):
"""Get the number of distinct targets with at least one spectrum in this brick file.
Returns:
int: Number of unique targets represented with spectra in this brick file.
"""
return len(np.unique(self.hdu_list[4].data['TARGETID']))
def close(self):
"""Write any updates and close the brick file.
"""
if self.mode == 'update':
self.hdu_list.writeto(self.path,clobber = True)
self.hdu_list.close()
class Brick(BrickBase):
"""Represents the combined cframe exposures in a single brick and band.
See :class:`BrickBase` for constructor info.
"""
def __init__(self,path,mode = 'readonly',header = None):
BrickBase.__init__(self,path,mode,header)
def add_objects(self,flux,ivar,wave,resolution,object_data,night,expid):
"""Add a list of objects to this brick file from the same night and exposure.
Args:
flux(numpy.ndarray): Array of (nobj,nwave) flux values for nobj objects tabulated at nwave wavelengths.
ivar(numpy.ndarray): Array of (nobj,nwave) inverse-variance values.
wave(numpy.ndarray): Array of (nwave,) wavelength values in Angstroms. All objects are assumed to use the same wavelength grid.
resolution(numpy.ndarray): Array of (nobj,nres,nwave) resolution matrix elements.
object_data(numpy.ndarray): Record array of fibermap rows for the objects to add.
night(str): Date string for the night these objects were observed in the format YYYYMMDD.
expid(int): Exposure number for these objects.
Raises:
RuntimeError: Can only add objects in update mode.
"""
BrickBase.add_objects(self,flux,ivar,wave,resolution)
# Augment object_data with constant NIGHT and EXPID columns.
augmented_data = np.empty(shape = object_data.shape,dtype = self.hdu_list[4].data.dtype)
for column_def in desispec.io.fibermap.fibermap_columns:
name = column_def[0]
# Special handling for the fibermap FILTER array, which is not output correctly
# by astropy.io.fits so we convert it to a comma-separated list.
if name == 'FILTER' and augmented_data[name].shape != object_data[name].shape:
for i,filters in enumerate(object_data[name]):
augmented_data[name][i] = ','.join(filters)
else:
augmented_data[name] = object_data[name]
augmented_data['NIGHT'] = int(night)
augmented_data['EXPID'] = expid
begin_index = len(self.hdu_list[4].data)
end_index = begin_index + len(flux)
augmented_data['INDEX'] = np.arange(begin_index,end_index,dtype=int)
# Always concatenate to our table since a new file will be created with a zero-length table.
self.hdu_list[4].data = np.concatenate((self.hdu_list[4].data,augmented_data,))
class CoAddedBrick(BrickBase):
"""Represents the co-added exposures in a single brick and, possibly, a single band.
See :class:`BrickBase` for constructor info.
"""
def __init__(self,path,mode = 'readonly',header = None):
BrickBase.__init__(self,path,mode,header)
|
|
# -*- coding: utf-8 -*-
import datetime
import sys
from reportlab.graphics import renderPDF
from reportlab.graphics.barcode.qr import QrCodeWidget
from reportlab.graphics.shapes import Drawing
from reportlab.lib.pagesizes import A4
from reportlab.pdfgen import canvas
from reportlab.lib.units import mm
from reportlab.platypus import (
Image,
Paragraph,
SimpleDocTemplate,
Spacer,
Table,
)
from .letter_design import STYLES
from .letter_processor import ProcessedText
from .models import Letterhead, ContentTemplate, Letter
class NumberedCanvas(canvas.Canvas):
def __init__(self, *args, **kwargs):
"""Constructor"""
canvas.Canvas.__init__(self, *args, **kwargs)
self._saved_page_states = []
def showPage(self):
self._saved_page_states.append(dict(self.__dict__))
self._startPage()
def save(self):
"""add page info to each page (page x of y)"""
num_pages = len(self._saved_page_states)
for state in self._saved_page_states:
self.__dict__.update(state)
self.draw_page_number(num_pages)
canvas.Canvas.showPage(self)
canvas.Canvas.save(self)
def draw_page_number(self, page_count):
# Change the position of this to wherever you want the page number to be
self.drawRightString(
195 * mm,
10 * mm,
"Page %d of %d" % (self._pageNumber, page_count)
)
class LetterCanvas(object):
def __init__(self, letterhead, content_template, letter, response_FLO):
"""Constructor"""
self.letterhead = letterhead
self.content_template = content_template
self.letter = letter
self.response_FLO = response_FLO
self.pagesize = A4
self.width, self.height = self.pagesize
def run(self):
"""
Run the report
"""
self.doc = SimpleDocTemplate(
self.response_FLO,
rightMargin=self.letterhead.right_margin*mm,
leftMargin=self.letterhead.left_margin*mm,
topMargin=self.letterhead.top_margin*mm,
bottomMargin=self.letterhead.bottom_margin*mm,
pagesize=self.pagesize,
)
self.elements = [Spacer(1, 67*mm)]
self.insert_content()
self.doc.build(
self.elements,
onFirstPage=self.first_page,
onLaterPages=self.subsequent_pages,
canvasmaker=NumberedCanvas
)
def first_page(self, canvas, doc):
"""
Defines layout for the first page of our letter.
"""
# Save the state of our canvas so we can draw on it
canvas.saveState()
# Logo block
logo = Image(
self.letterhead.logo.image,
width=self.letterhead.logo_width*mm,
height=self.letterhead.logo_height*mm
)
logo.wrapOn(canvas, doc.width/2.0, doc.height)
logo.drawOn(
canvas,
self.letterhead.logo_x*mm,
(297-self.letterhead.logo_y-self.letterhead.logo_height)*mm
)
# Return address block
ptext = "<br/>".join([line for line in self.letterhead.return_contacts.split('\n')])
p = Paragraph(ptext, STYLES['ReturnAddress'])
p.wrapOn(
canvas,
doc.width/3.0,
doc.height
)
p.drawOn(
canvas,
self.letterhead.return_contacts_x*mm,
(257-self.letterhead.return_contacts_y)*mm
)
# Reference block
ptext = "Your reference: " + self.letter.your_reference
p = Paragraph(ptext, STYLES['ReturnAddress'])
p.wrapOn(
canvas,
doc.width/3.0,
doc.height
)
p.drawOn(
canvas,
self.letterhead.your_reference_x*mm,
(257-self.letterhead.your_reference_y)*mm
)
ptext = "Our reference: " + self.letter.our_reference
p = Paragraph(ptext, STYLES['ReturnAddress'])
p.wrapOn(
canvas,
doc.width/3.0,
doc.height
)
p.drawOn(
canvas,
self.letterhead.our_reference_x*mm,
(257-self.letterhead.our_reference_y)*mm
)
# Recipient address block
ptext = "<font size=12>" + "<br/>".join(
[
(" ").join([
self.letter.addressee_title,
self.letter.addressee_first_name,
self.letter.addressee_second_name,
]),
self.letter.address_1,
self.letter.address_2,
self.letter.address_3,
self.letter.town,
self.letter.postcode,
"</font>",
]
)
p = Paragraph(ptext, STYLES['Normal'])
p.wrapOn(canvas, doc.width-300, doc.height)
p.drawOn(canvas, 15*mm, 197*mm)
# Footer
# See http://stackoverflow.com/a/13132282
qr_code = QrCodeWidget(self.letter.barcode)
drawing = Drawing(45, 45)
drawing.add(qr_code)
renderPDF.draw(drawing, canvas, 1, 1)
footer = Paragraph(self.letter.barcode, STYLES['Normal'])
w, h = footer.wrap(doc.width, doc.bottomMargin)
footer.drawOn(canvas, doc.leftMargin+50, h)
# Release the canvas
canvas.restoreState()
def subsequent_pages(self, canvas, doc):
"""
Defines layout for all pages of our letter but the first.
"""
# Save the state of our canvas so we can draw on it
canvas.saveState()
# Header
header = Paragraph(self.content_template.name, STYLES['Normal'])
w, h = header.wrap(doc.width, doc.topMargin)
header.drawOn(canvas, doc.leftMargin, doc.height + doc.topMargin + doc.bottomMargin - h*mm)
# Footer
qr_code = QrCodeWidget(self.letter.barcode)
drawing = Drawing(45, 45)
drawing.add(qr_code)
renderPDF.draw(drawing, canvas, 1, 1)
footer = Paragraph(self.letter.barcode, STYLES['Normal'])
w, h = footer.wrap(doc.width, doc.bottomMargin)
footer.drawOn(canvas, doc.leftMargin+50, h)
# Release the canvas
canvas.restoreState()
def insert_content(self):
"""
Inserts the flowable elements into our letter.
"""
# Draw things on the PDF. Here's where the PDF generation happens.
# See the ReportLab documentation for full list of functionality.
self.elements.append(Paragraph(self.letter.date_sent.strftime("%d %B %Y"), STYLES['DateLine']))
self.elements.append(Paragraph(self.letter.letter_title, STYLES['LetterTitle']))
if self.letter.addressee_organisation and not self.letter.addressee_title:
salutation = "Dear sir or madam,"
sign_off = "Yours faithfully,"
else:
salutation = (" ").join([
self.letter.addressee_title,
self.letter.addressee_second_name,
])
salutation += ","
sign_off = "Yours sincerely,"
self.elements.append(Paragraph(salutation, STYLES['Salutation']))
flowable_text = ProcessedText(
self.content_template,
self.letter
).process().decode('utf-8')
for i, par in enumerate(flowable_text.split('\n')):
self.elements.append(Paragraph(par, STYLES['LetterBody']))
self.elements.append(Paragraph(sign_off, STYLES['Salutation']))
self.elements.append(Paragraph(self.letter.sender_name, STYLES['Signature']))
self.elements.append(Paragraph(self.letter.sender_title, STYLES['SignatoryTitle']))
|
|
#!/usr/bin/python
#Copyright (c) 2016, Justin R. Klesmith
#All rights reserved.
#QuickStats: Get the statistics from a enrich run
from __future__ import division
from subprocess import check_output
from math import log
import StringIO
import argparse
import time
import os
__author__ = "Justin R. Klesmith"
__copyright__ = "Copyright 2016, Justin R. Klesmith"
__credits__ = ["Justin R. Klesmith", "Timothy A. Whitehead"]
__license__ = "BSD-3"
__version__ = "1.4X, Build: 201507X"
__maintainer__ = "Justin R. Klesmith"
__email__ = "klesmit3@msu.edu"
#Build Notes:
#1.3 - 20150616 - Fixed counting bug at the end of the tile in CodonSubs such that it's just less than and not equal to
#Get commandline arguments
parser = argparse.ArgumentParser(description='Quick Enrich Stats - Note: you must pre-normalize the data using QuickNormalize.py.')
parser.add_argument('-f', dest='file', action='store', help='File of your already normalized dataset')
parser.add_argument('-p', dest='path', action='store', help='What is the path to the enrich tile directory? ie: ./tile/')
parser.add_argument('-l', dest='tilelength', action='store', help='Tile length override')
parser.add_argument('-s', dest='tilestart', action='store', help='Tile start override')
args = parser.parse_args()
#Verify inputs
if args.file == None:
print "No normalized file given"
quit()
if args.path == None:
print "No enrich path given"
quit()
#Global vars
AA_Table = '*ACDEFGHIKLMNPQRSTVWY'
Mutations = {}
NumResi = 0 #Tile length
NormData = ""
StartResidue = 0
def Build_Matrix():
#Populate Mutation Dictionary with None Data
for j in xrange(0+StartResidue,NumResi+StartResidue):
for i in enumerate(AA_Table):
try:
#Mutations[ResID][MutID[1]][0 = NormLog2, 1 = Unselected, 2 = Selected]
Mutations[j][i[1]] = [None, None, None]
except KeyError:
Mutations[j] = {}
Mutations[j][i[1]] = [None, None, None]
return Mutations
def ImportNormData():
global NumResi
global NormData
global StartResidue
lines = 0
normdata = ""
#Import the previously normalized data
with open(args.file) as infile:
copy = False
for line in infile:
if line.strip() == "Location,Mutation,Normalized_ER,Unselected_Reads,Selected_Reads,RawLog2":
copy = True
elif line.strip() == "Normalized Heatmap":
copy = False
elif line.startswith("Tile Length: "):
if args.tilelength != None:
NumResi = int(args.tilelength)
else:
NumResi = int(line.strip()[13:])
print "Tile length: "+str(NumResi)
elif line.startswith("Start residue (-s): "):
split = line.split(" ")
if args.tilestart != None:
StartResidue = int(args.tilestart)
else:
StartResidue = int(split[3]) #Set the start residue
elif copy:
NormData = NormData + line
lines = lines + 1
#NumResi = int(lines / 21) #Set the tile length
return normdata
def PopulateMutArrays():
#Loop through the output
for line in StringIO.StringIO(NormData):
split = line.split(",")
location = int(split[0])
identity = str(split[1])
#Ignore if our location is above our number of residues
if location > (NumResi + StartResidue - 1):
print "Above Tile Length Reject: "+str(location)+"-"+str(identity)
continue
#Ignore if our location is below our number of residues
if location < StartResidue:
print "Below Tile Start Reject "+str(location)+"-"+str(identity)
continue
Mutations[location][identity][0] = split[2]
Mutations[location][identity][1] = split[3]
Mutations[location][identity][2] = split[4].rstrip('\n')
return Mutations
def DNAReads():
reads = {} #Initialize the variable for the number of reads 0=unsel, 1=sel
SC = 0
UC = 0
selectedcounts = ""
unselectedcounts = ""
if os.path.isfile(args.path+'data/output/counts_sel_example_F_N_include_filtered_B_DNA_qc'):
selectedcounts = check_output(["awk", 'FNR>1{ print $9 }', args.path+'data/output/counts_sel_example_F_N_include_filtered_B_DNA_qc'])
elif os.path.isfile(args.path+'data/output/counts_sel_example_F_N_include_filtered_R1_DNA_qc'):
selectedcounts = check_output(["awk", 'FNR>1{ print $9 }', args.path+'data/output/counts_sel_example_F_N_include_filtered_R1_DNA_qc'])
else:
print "Can't find selected DNA counts"
quit()
if os.path.isfile(args.path+'data/output/counts_unsel_example_F_N_include_filtered_B_DNA_qc'):
unselectedcounts = check_output(["awk", 'FNR>1{ print $9 }', args.path+'data/output/counts_unsel_example_F_N_include_filtered_B_DNA_qc'])
elif os.path.isfile(args.path+'data/output/counts_unsel_example_F_N_include_filtered_R1_DNA_qc'):
unselectedcounts = check_output(["awk", 'FNR>1{ print $9 }', args.path+'data/output/counts_unsel_example_F_N_include_filtered_R1_DNA_qc'])
else:
print "Can't find unselected DNA counts"
quit()
#Loop through the output
for line in StringIO.StringIO(selectedcounts):
split = line.split(" ")
SC = SC + int(split[0].rstrip('\n'))
for line in StringIO.StringIO(unselectedcounts):
split = line.split(" ")
UC = UC + int(split[0].rstrip('\n'))
reads[0] = str(UC) #Set the unselected reads
reads[1] = str(SC) #Set the selected reads
return reads
def MutationCounts():
muts = {}
NM00 = 0
NM10 = 0
NM15 = 0
NM30 = 0
NM50 = 0
NM100 = 0
FiveThreshold = 0
Retained = 0
for j in xrange(0+StartResidue,NumResi+StartResidue):
for i in enumerate(AA_Table):
if Mutations[j][i[1]][0] != "NS":
if float(Mutations[j][i[1]][0]) > 0.00:
NM00 += 1
if float(Mutations[j][i[1]][0]) > 0.10:
NM10 += 1
if float(Mutations[j][i[1]][0]) > 0.15:
NM15 += 1
if float(Mutations[j][i[1]][0]) > 0.30:
NM30 += 1
if float(Mutations[j][i[1]][0]) > 0.50:
NM50 += 1
if float(Mutations[j][i[1]][0]) > 1.00:
NM100 += 1
if Mutations[j][i[1]][1] != "None":
if int(Mutations[j][i[1]][1]) >= 5:
FiveThreshold += 1
if Mutations[j][i[1]][2] != "None":
Retained += 1
muts[0] = NM00
muts[1] = NM10
muts[2] = NM15
muts[3] = NM30
muts[4] = NM50
muts[5] = NM100
muts[6] = FiveThreshold
muts[7] = Retained
return muts
def Nonsynonymous():
reads = {}
Total = 0
Single = 0
WT = 0
ALL = ""
M1 = ""
if os.path.isfile(args.path+'data/output/counts_unsel_example_F_N_include_filtered_B_PRO_qc'):
ALL = check_output(["awk", 'FNR>1{ print $1,$9 }', args.path+'data/output/counts_unsel_example_F_N_include_filtered_B_PRO_qc'])
elif os.path.isfile(args.path+'data/output/counts_unsel_example_F_N_include_filtered_R1_PRO_qc'):
ALL = check_output(["awk", 'FNR>1{ print $1,$9 }', args.path+'data/output/counts_unsel_example_F_N_include_filtered_R1_PRO_qc'])
else:
print "Unsel protein counts not found"
quit()
if os.path.isfile(args.path+'data/output/counts_unsel_example_F_N_include_filtered_B_PRO_qc.m1'):
M1 = check_output(["awk", 'FNR>1{ print $9 }', args.path+'data/output/counts_unsel_example_F_N_include_filtered_B_PRO_qc.m1'])
elif os.path.isfile(args.path+'data/output/counts_unsel_example_F_N_include_filtered_R1_PRO_qc.m1'):
M1 = check_output(["awk", 'FNR>1{ print $9 }', args.path+'data/output/counts_unsel_example_F_N_include_filtered_R1_PRO_qc.m1'])
else:
print "Unsel protein counts.m1 not found"
quit()
#Loop through the output
for line in StringIO.StringIO(ALL):
split = line.split(" ")
if split[0] == "NA-NA":
WT = int(split[1])
Total = Total + int(split[1].rstrip('\n'))
for line in StringIO.StringIO(M1):
split = line.split(" ")
Single = Single + int(split[0].rstrip('\n'))
reads[0] = WT #Wild-type
reads[1] = Single #.m1
reads[2] = (Total - Single - WT) #all - .m1 - WT
return reads
def CodonSubs():
codons = {}
One = 0
Two = 0
Three = 0
#Get the start of translation
TranslateStart = 0
TranslateEnd = 0
with open(args.path+'input/example_local_config') as infile:
for line in infile:
if line.startswith("<translate_start>"):
TSLen = len(line)
TranslateStart = int(line[17:TSLen-20])
TranslateEnd = TranslateStart+(3*NumResi)
ALL = ""
M1 = ""
M2 = ""
if os.path.isfile(args.path+'data/output/counts_unsel_example_F_N_include_filtered_B_DNA_qc'):
ALL = check_output(["awk", 'FNR>1{ print $4,$5 }', args.path+'data/output/counts_unsel_example_F_N_include_filtered_B_DNA_qc'])
elif os.path.isfile(args.path+'data/output/counts_unsel_example_F_N_include_filtered_R1_DNA_qc'):
ALL = check_output(["awk", 'FNR>1{ print $4,$5 }', args.path+'data/output/counts_unsel_example_F_N_include_filtered_R1_DNA_qc'])
else:
print "Counts unsel DNA not found."
quit()
if os.path.isfile(args.path+'data/output/counts_unsel_example_F_N_include_filtered_B_DNA_qc.m1'):
M1 = check_output(["awk", 'FNR>1{ print $5 }', args.path+'data/output/counts_unsel_example_F_N_include_filtered_B_DNA_qc.m1'])
elif os.path.isfile(args.path+'data/output/counts_unsel_example_F_N_include_filtered_R1_DNA_qc.m1'):
M1 = check_output(["awk", 'FNR>1{ print $5 }', args.path+'data/output/counts_unsel_example_F_N_include_filtered_R1_DNA_qc.m1'])
else:
print "Counts unsel DNA.m1 not found."
quit()
if os.path.isfile(args.path+'data/output/counts_unsel_example_F_N_include_filtered_B_DNA_qc.m2'):
M2 = check_output(["awk", 'FNR>1{ print $5 }', args.path+'data/output/counts_unsel_example_F_N_include_filtered_B_DNA_qc.m2'])
elif os.path.isfile(args.path+'data/output/counts_unsel_example_F_N_include_filtered_R1_DNA_qc.m2'):
M2 = check_output(["awk", 'FNR>1{ print $5 }', args.path+'data/output/counts_unsel_example_F_N_include_filtered_R1_DNA_qc.m2'])
else:
print "Counts unsel DNA.m2 not found."
quit()
#Check for single base mutations
for line in StringIO.StringIO(M1):
split = line.split(" ")
if int(split[0]) >= TranslateStart and int(split[0]) < TranslateEnd: #Check to see that the base is in our tile
One = One + 1
#Check for double base mutations
for line in StringIO.StringIO(M2):
split2 = line.split(" ")
location = split2[0].split(",") #Get the individual mutation locations
if int(location[0]) >= TranslateStart and int(location[0]) < TranslateEnd: #Check to see that the base is in our tile
if int(location[1]) >= TranslateStart and int(location[1]) < TranslateEnd: #Check to see that the base is in our tile
codon1 = int((int(location[0]) - int(TranslateStart))/3)
codon2 = int((int(location[1]) - int(TranslateStart))/3)
if codon1 == codon2:
Two = Two + 1
#Check for triple base mutations
for line in StringIO.StringIO(ALL):
split3 = line.split(" ")
if split3[0] == "3": #Test to see that there are three mutations
location = split3[1].split(",") #Get the individual mutation locations
if int(location[0]) >= TranslateStart and int(location[0]) < TranslateEnd: #Check to see that the base is in our tile
if int(location[1]) >= TranslateStart and int(location[1]) < TranslateEnd: #Check to see that the base is in our tile
if int(location[2]) >= TranslateStart and int(location[2]) < TranslateEnd: #Check to see that the base is in our tile
codon1 = int((int(location[0]) - int(TranslateStart))/3)
codon2 = int((int(location[1]) - int(TranslateStart))/3)
codon3 = int((int(location[2]) - int(TranslateStart))/3)
if codon1 == codon2 and codon2 == codon3:
Three = Three + 1
codons[0] = One #1-base sub
codons[1] = Two #2-base sub
codons[2] = Three #3-base sub
return codons
def RunStats():
print "Stat run parameters:"
print time.strftime("%H:%M:%S")
print time.strftime("%m/%d/%Y")
print "Nomalized file: "+args.file
print "Data path: "+args.path
print "Tile length: "+str(NumResi)
print "Tile start: "+str(StartResidue)
if args.tilelength != None:
print "Custom tile length passed on the command line"
if args.tilestart != None:
print "Custom tile start passed on the command line"
reads = DNAReads()
print "Unselected DNA sequences (reads) from Enrich: "+reads[0]
print "Selected DNA sequences (reads) from Enrich: "+reads[1]
mutations = MutationCounts()
print "Number of mutations above 0.00: "+str(mutations[0])
print "Number of mutations above 0.10: "+str(mutations[1])
print "Number of mutations above 0.15: "+str(mutations[2])
print "Number of mutations above 0.30: "+str(mutations[3])
print "Number of mutations above 0.50: "+str(mutations[4])
print "Number of mutations above 1.00: "+str(mutations[5])
print "Number unselected mutants above threshold of 5: "+str(mutations[6])
print "Number of mutations retained in the selected population (not given a 1 if significant in unsel): "+str(mutations[7])
codons = CodonSubs()
print "Percent of possible codon subsititions observed in the unselected population:"
print "1-base substitution (#codons*9): {0:.1f}".format((codons[0]/(9*NumResi)*100))+"% "+str(codons[0])+"/"+str(9*NumResi)
print "2-base substitutions (#codons*27): {0:.1f}".format((codons[1]/(27*NumResi)*100))+"% "+str(codons[1])+"/"+str(27*NumResi)
print "3-base substitutions (#codons*27): {0:.1f}".format((codons[2]/(27*NumResi)*100))+"% "+str(codons[2])+"/"+str(27*NumResi)
print "Total base substitutions: "+str(codons[0]+codons[1]+codons[2])+"/"+str(63*NumResi)
nonsynonymous = Nonsynonymous()
print "Percent of unselected reads with: "
print "No nonsynonymous mutations: {0:.1f}".format((nonsynonymous[0]/int(reads[0]))*100)+"% "+str(nonsynonymous[0])+"/"+reads[0]
print "One nonsynonymous mutation: {0:.1f}".format((nonsynonymous[1]/int(reads[0]))*100)+"% "+str(nonsynonymous[1])+"/"+reads[0]
print "Multiple nonsynonymous mutations: {0:.1f}".format((nonsynonymous[2]/int(reads[0]))*100)+"% "+str(nonsynonymous[2])+"/"+reads[0]
print "Coverage of possible single nonsynonymous amino acid mutations: {0:.1f}".format((mutations[6]/(NumResi*20))*100)+"% "+str(mutations[6])+"/"+str(NumResi*20)
return
def main():
#Write out preample
print "QuickStats"
print "Author: "+__author__
print "Contact: "+__email__
print __copyright__
print "License: "+__license__
print "Credits: "+__credits__[0]+", "+__credits__[1]
print ""
print "Please cite:"
print "Github [user: JKlesmith] (www.github.com)"
print "Klesmith JR, Bacik J-P, Michalczyk R, Whitehead TA. 2015. Comprehensive sequence-flux mapping of metabolic pathways in living cells."
print "Kowalsky CA, Klesmith JR, Stapleton JA, Kelly V, Reichkitzer N, Whitehead TA. 2015. High-Resolution Sequence-Function Mapping of Full-Length Proteins. PLoS ONE 10(3):e0118193. doi:10.1371/journal.pone.0118193."
print ""
#Print out run stats
ImportNormData()
Build_Matrix()
PopulateMutArrays()
RunStats()
if __name__ == '__main__':
main()
|
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2018 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
from functools import partial
from ansible.module_utils._text import to_native
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback
try:
from infoblox_client.connector import Connector
from infoblox_client.exceptions import InfobloxException
HAS_INFOBLOX_CLIENT = True
except ImportError:
HAS_INFOBLOX_CLIENT = False
# defining nios constants
NIOS_DNS_VIEW = 'view'
NIOS_NETWORK_VIEW = 'networkview'
NIOS_HOST_RECORD = 'record:host'
NIOS_IPV4_NETWORK = 'network'
NIOS_IPV6_NETWORK = 'ipv6network'
NIOS_ZONE = 'zone_auth'
NIOS_PTR_RECORD = 'record:ptr'
NIOS_A_RECORD = 'record:a'
NIOS_AAAA_RECORD = 'record:aaaa'
NIOS_CNAME_RECORD = 'record:cname'
NIOS_MX_RECORD = 'record:mx'
NIOS_SRV_RECORD = 'record:srv'
NIOS_NAPTR_RECORD = 'record:naptr'
NIOS_TXT_RECORD = 'record:txt'
NIOS_NSGROUP = 'nsgroup'
NIOS_IPV4_FIXED_ADDRESS = 'fixedaddress'
NIOS_IPV6_FIXED_ADDRESS = 'ipv6fixedaddress'
NIOS_NEXT_AVAILABLE_IP = 'func:nextavailableip'
NIOS_IPV4_NETWORK_CONTAINER = 'networkcontainer'
NIOS_IPV6_NETWORK_CONTAINER = 'ipv6networkcontainer'
NIOS_MEMBER = 'member'
NIOS_PROVIDER_SPEC = {
'host': dict(fallback=(env_fallback, ['INFOBLOX_HOST'])),
'username': dict(fallback=(env_fallback, ['INFOBLOX_USERNAME'])),
'password': dict(fallback=(env_fallback, ['INFOBLOX_PASSWORD']), no_log=True),
'validate_certs': dict(type='bool', default=False, fallback=(env_fallback, ['INFOBLOX_SSL_VERIFY']), aliases=['ssl_verify']),
'silent_ssl_warnings': dict(type='bool', default=True),
'http_request_timeout': dict(type='int', default=10, fallback=(env_fallback, ['INFOBLOX_HTTP_REQUEST_TIMEOUT'])),
'http_pool_connections': dict(type='int', default=10),
'http_pool_maxsize': dict(type='int', default=10),
'max_retries': dict(type='int', default=3, fallback=(env_fallback, ['INFOBLOX_MAX_RETRIES'])),
'wapi_version': dict(default='2.1', fallback=(env_fallback, ['INFOBLOX_WAP_VERSION'])),
'max_results': dict(type='int', default=1000, fallback=(env_fallback, ['INFOBLOX_MAX_RETRIES']))
}
def get_connector(*args, **kwargs):
''' Returns an instance of infoblox_client.connector.Connector
:params args: positional arguments are silently ignored
:params kwargs: dict that is passed to Connector init
:returns: Connector
'''
if not HAS_INFOBLOX_CLIENT:
raise Exception('infoblox-client is required but does not appear '
'to be installed. It can be installed using the '
'command `pip install infoblox-client`')
if not set(kwargs.keys()).issubset(list(NIOS_PROVIDER_SPEC.keys()) + ['ssl_verify']):
raise Exception('invalid or unsupported keyword argument for connector')
for key, value in iteritems(NIOS_PROVIDER_SPEC):
if key not in kwargs:
# apply default values from NIOS_PROVIDER_SPEC since we cannot just
# assume the provider values are coming from AnsibleModule
if 'default' in value:
kwargs[key] = value['default']
# override any values with env variables unless they were
# explicitly set
env = ('INFOBLOX_%s' % key).upper()
if env in os.environ:
kwargs[key] = os.environ.get(env)
if 'validate_certs' in kwargs.keys():
kwargs['ssl_verify'] = kwargs['validate_certs']
kwargs.pop('validate_certs', None)
return Connector(kwargs)
def normalize_extattrs(value):
''' Normalize extattrs field to expected format
The module accepts extattrs as key/value pairs. This method will
transform the key/value pairs into a structure suitable for
sending across WAPI in the format of:
extattrs: {
key: {
value: <value>
}
}
'''
return dict([(k, {'value': v}) for k, v in iteritems(value)])
def flatten_extattrs(value):
''' Flatten the key/value struct for extattrs
WAPI returns extattrs field as a dict in form of:
extattrs: {
key: {
value: <value>
}
}
This method will flatten the structure to:
extattrs: {
key: value
}
'''
return dict([(k, v['value']) for k, v in iteritems(value)])
def member_normalize(member_spec):
''' Transforms the member module arguments into a valid WAPI struct
This function will transform the arguments into a structure that
is a valid WAPI structure in the format of:
{
key: <value>,
}
It will remove any arguments that are set to None since WAPI will error on
that condition.
The remainder of the value validation is performed by WAPI
Some parameters in ib_spec are passed as a list in order to pass the validation for elements.
In this function, they are converted to dictionary.
'''
member_elements = ['vip_setting', 'ipv6_setting', 'lan2_port_setting', 'mgmt_port_setting',
'pre_provisioning', 'network_setting', 'v6_network_setting',
'ha_port_setting', 'lan_port_setting', 'lan2_physical_setting',
'lan_ha_port_setting', 'mgmt_network_setting', 'v6_mgmt_network_setting']
for key in member_spec.keys():
if key in member_elements and member_spec[key] is not None:
member_spec[key] = member_spec[key][0]
if isinstance(member_spec[key], dict):
member_spec[key] = member_normalize(member_spec[key])
elif isinstance(member_spec[key], list):
for x in member_spec[key]:
if isinstance(x, dict):
x = member_normalize(x)
elif member_spec[key] is None:
del member_spec[key]
return member_spec
class WapiBase(object):
''' Base class for implementing Infoblox WAPI API '''
provider_spec = {'provider': dict(type='dict', options=NIOS_PROVIDER_SPEC)}
def __init__(self, provider):
self.connector = get_connector(**provider)
def __getattr__(self, name):
try:
return self.__dict__[name]
except KeyError:
if name.startswith('_'):
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
return partial(self._invoke_method, name)
def _invoke_method(self, name, *args, **kwargs):
try:
method = getattr(self.connector, name)
return method(*args, **kwargs)
except InfobloxException as exc:
if hasattr(self, 'handle_exception'):
self.handle_exception(name, exc)
else:
raise
class WapiLookup(WapiBase):
''' Implements WapiBase for lookup plugins '''
def handle_exception(self, method_name, exc):
if ('text' in exc.response):
raise Exception(exc.response['text'])
else:
raise Exception(exc)
class WapiInventory(WapiBase):
''' Implements WapiBase for dynamic inventory script '''
pass
class WapiModule(WapiBase):
''' Implements WapiBase for executing a NIOS module '''
def __init__(self, module):
self.module = module
provider = module.params['provider']
try:
super(WapiModule, self).__init__(provider)
except Exception as exc:
self.module.fail_json(msg=to_text(exc))
def handle_exception(self, method_name, exc):
''' Handles any exceptions raised
This method will be called if an InfobloxException is raised for
any call to the instance of Connector and also, in case of generic
exception. This method will then gracefully fail the module.
:args exc: instance of InfobloxException
'''
if ('text' in exc.response):
self.module.fail_json(
msg=exc.response['text'],
type=exc.response['Error'].split(':')[0],
code=exc.response.get('code'),
operation=method_name
)
else:
self.module.fail_json(msg=to_native(exc))
def run(self, ib_obj_type, ib_spec):
''' Runs the module and performans configuration tasks
:args ib_obj_type: the WAPI object type to operate against
:args ib_spec: the specification for the WAPI object as a dict
:returns: a results dict
'''
update = new_name = None
state = self.module.params['state']
if state not in ('present', 'absent'):
self.module.fail_json(msg='state must be one of `present`, `absent`, got `%s`' % state)
result = {'changed': False}
obj_filter = dict([(k, self.module.params[k]) for k, v in iteritems(ib_spec) if v.get('ib_req')])
# get object reference
ib_obj_ref, update, new_name = self.get_object_ref(self.module, ib_obj_type, obj_filter, ib_spec)
proposed_object = {}
for key, value in iteritems(ib_spec):
if self.module.params[key] is not None:
if 'transform' in value:
proposed_object[key] = value['transform'](self.module)
else:
proposed_object[key] = self.module.params[key]
# If configure_by_dns is set to False, then delete the default dns set in the param else throw exception
if not proposed_object.get('configure_for_dns') and proposed_object.get('view') == 'default'\
and ib_obj_type == NIOS_HOST_RECORD:
del proposed_object['view']
elif not proposed_object.get('configure_for_dns') and proposed_object.get('view') != 'default'\
and ib_obj_type == NIOS_HOST_RECORD:
self.module.fail_json(msg='DNS Bypass is not allowed if DNS view is set other than \'default\'')
if ib_obj_ref:
if len(ib_obj_ref) > 1:
for each in ib_obj_ref:
# To check for existing A_record with same name with input A_record by IP
if each.get('ipv4addr') and each.get('ipv4addr') == proposed_object.get('ipv4addr'):
current_object = each
# To check for existing Host_record with same name with input Host_record by IP
elif each.get('ipv4addrs')[0].get('ipv4addr') and each.get('ipv4addrs')[0].get('ipv4addr')\
== proposed_object.get('ipv4addrs')[0].get('ipv4addr'):
current_object = each
# Else set the current_object with input value
else:
current_object = obj_filter
ref = None
else:
current_object = ib_obj_ref[0]
if 'extattrs' in current_object:
current_object['extattrs'] = flatten_extattrs(current_object['extattrs'])
if current_object.get('_ref'):
ref = current_object.pop('_ref')
else:
current_object = obj_filter
ref = None
# checks if the object type is member to normalize the attributes being passed
if (ib_obj_type == NIOS_MEMBER):
proposed_object = member_normalize(proposed_object)
# checks if the name's field has been updated
if update and new_name:
proposed_object['name'] = new_name
res = None
modified = not self.compare_objects(current_object, proposed_object)
if 'extattrs' in proposed_object:
proposed_object['extattrs'] = normalize_extattrs(proposed_object['extattrs'])
# Checks if nios_next_ip param is passed in ipv4addrs/ipv4addr args
proposed_object = self.check_if_nios_next_ip_exists(proposed_object)
if state == 'present':
if ref is None:
if not self.module.check_mode:
self.create_object(ib_obj_type, proposed_object)
result['changed'] = True
# Check if NIOS_MEMBER and the flag to call function create_token is set
elif (ib_obj_type == NIOS_MEMBER) and (proposed_object['create_token']):
proposed_object = None
# the function creates a token that can be used by a pre-provisioned member to join the grid
result['api_results'] = self.call_func('create_token', ref, proposed_object)
result['changed'] = True
elif modified:
self.check_if_recordname_exists(obj_filter, ib_obj_ref, ib_obj_type, current_object, proposed_object)
if (ib_obj_type in (NIOS_HOST_RECORD, NIOS_NETWORK_VIEW, NIOS_DNS_VIEW)):
proposed_object = self.on_update(proposed_object, ib_spec)
res = self.update_object(ref, proposed_object)
if (ib_obj_type in (NIOS_A_RECORD, NIOS_AAAA_RECORD, NIOS_PTR_RECORD, NIOS_SRV_RECORD)):
# popping 'view' key as update of 'view' is not supported with respect to a:record/aaaa:record/srv:record/ptr:record
proposed_object = self.on_update(proposed_object, ib_spec)
del proposed_object['view']
res = self.update_object(ref, proposed_object)
elif 'network_view' in proposed_object:
proposed_object.pop('network_view')
if not self.module.check_mode and res is None:
proposed_object = self.on_update(proposed_object, ib_spec)
self.update_object(ref, proposed_object)
result['changed'] = True
elif state == 'absent':
if ref is not None:
if not self.module.check_mode:
self.delete_object(ref)
result['changed'] = True
return result
def check_if_recordname_exists(self, obj_filter, ib_obj_ref, ib_obj_type, current_object, proposed_object):
''' Send POST request if host record input name and retrieved ref name is same,
but input IP and retrieved IP is different'''
if 'name' in (obj_filter and ib_obj_ref[0]) and ib_obj_type == NIOS_HOST_RECORD:
obj_host_name = obj_filter['name']
ref_host_name = ib_obj_ref[0]['name']
if 'ipv4addrs' in (current_object and proposed_object):
current_ip_addr = current_object['ipv4addrs'][0]['ipv4addr']
proposed_ip_addr = proposed_object['ipv4addrs'][0]['ipv4addr']
elif 'ipv6addrs' in (current_object and proposed_object):
current_ip_addr = current_object['ipv6addrs'][0]['ipv6addr']
proposed_ip_addr = proposed_object['ipv6addrs'][0]['ipv6addr']
if obj_host_name == ref_host_name and current_ip_addr != proposed_ip_addr:
self.create_object(ib_obj_type, proposed_object)
def check_if_nios_next_ip_exists(self, proposed_object):
''' Check if nios_next_ip argument is passed in ipaddr while creating
host record, if yes then format proposed object ipv4addrs and pass
func:nextavailableip and ipaddr range to create hostrecord with next
available ip in one call to avoid any race condition '''
if 'ipv4addrs' in proposed_object:
if 'nios_next_ip' in proposed_object['ipv4addrs'][0]['ipv4addr']:
ip_range = self.module._check_type_dict(proposed_object['ipv4addrs'][0]['ipv4addr'])['nios_next_ip']
proposed_object['ipv4addrs'][0]['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range
elif 'ipv4addr' in proposed_object:
if 'nios_next_ip' in proposed_object['ipv4addr']:
ip_range = self.module._check_type_dict(proposed_object['ipv4addr'])['nios_next_ip']
proposed_object['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range
return proposed_object
def issubset(self, item, objects):
''' Checks if item is a subset of objects
:args item: the subset item to validate
:args objects: superset list of objects to validate against
:returns: True if item is a subset of one entry in objects otherwise
this method will return None
'''
for obj in objects:
if isinstance(item, dict):
if all(entry in obj.items() for entry in item.items()):
return True
else:
if item in obj:
return True
def compare_objects(self, current_object, proposed_object):
for key, proposed_item in iteritems(proposed_object):
current_item = current_object.get(key)
# if proposed has a key that current doesn't then the objects are
# not equal and False will be immediately returned
if current_item is None:
return False
elif isinstance(proposed_item, list):
for subitem in proposed_item:
if not self.issubset(subitem, current_item):
return False
elif isinstance(proposed_item, dict):
return self.compare_objects(current_item, proposed_item)
else:
if current_item != proposed_item:
return False
return True
def get_object_ref(self, module, ib_obj_type, obj_filter, ib_spec):
''' this function gets the reference object of pre-existing nios objects '''
update = False
old_name = new_name = None
if ('name' in obj_filter):
# gets and returns the current object based on name/old_name passed
try:
name_obj = self.module._check_type_dict(obj_filter['name'])
old_name = name_obj['old_name']
new_name = name_obj['new_name']
except TypeError:
name = obj_filter['name']
if old_name and new_name:
if (ib_obj_type == NIOS_HOST_RECORD):
test_obj_filter = dict([('name', old_name), ('view', obj_filter['view'])])
elif (ib_obj_type in (NIOS_AAAA_RECORD, NIOS_A_RECORD)):
test_obj_filter = obj_filter
else:
test_obj_filter = dict([('name', old_name)])
# get the object reference
ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=ib_spec.keys())
if ib_obj:
obj_filter['name'] = new_name
else:
test_obj_filter['name'] = new_name
ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=ib_spec.keys())
update = True
return ib_obj, update, new_name
if (ib_obj_type == NIOS_HOST_RECORD):
# to check only by name if dns bypassing is set
if not obj_filter['configure_for_dns']:
test_obj_filter = dict([('name', name)])
else:
test_obj_filter = dict([('name', name), ('view', obj_filter['view'])])
elif (ib_obj_type == NIOS_IPV4_FIXED_ADDRESS or ib_obj_type == NIOS_IPV6_FIXED_ADDRESS and 'mac' in obj_filter):
test_obj_filter = dict([['mac', obj_filter['mac']]])
elif (ib_obj_type == NIOS_A_RECORD):
# resolves issue where a_record with uppercase name was returning null and was failing
test_obj_filter = obj_filter
test_obj_filter['name'] = test_obj_filter['name'].lower()
# check if test_obj_filter is empty copy passed obj_filter
else:
test_obj_filter = obj_filter
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
elif (ib_obj_type == NIOS_ZONE):
# del key 'restart_if_needed' as nios_zone get_object fails with the key present
temp = ib_spec['restart_if_needed']
del ib_spec['restart_if_needed']
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
# reinstate restart_if_needed if ib_obj is none, meaning there's no existing nios_zone ref
if not ib_obj:
ib_spec['restart_if_needed'] = temp
elif (ib_obj_type == NIOS_MEMBER):
# del key 'create_token' as nios_member get_object fails with the key present
temp = ib_spec['create_token']
del ib_spec['create_token']
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
if temp:
# reinstate 'create_token' key
ib_spec['create_token'] = temp
else:
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
return ib_obj, update, new_name
def on_update(self, proposed_object, ib_spec):
''' Event called before the update is sent to the API endpoing
This method will allow the final proposed object to be changed
and/or keys filtered before it is sent to the API endpoint to
be processed.
:args proposed_object: A dict item that will be encoded and sent
the API endpoint with the updated data structure
:returns: updated object to be sent to API endpoint
'''
keys = set()
for key, value in iteritems(proposed_object):
update = ib_spec[key].get('update', True)
if not update:
keys.add(key)
return dict([(k, v) for k, v in iteritems(proposed_object) if k not in keys])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.