code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
#!/usr/bin/env python
# Copyright (c) 2001 actzero, inc. All rights reserved.
import sys
sys.path.insert(1, "..")
from SOAPpy import *
from SOAPpy import Parser
# Uncomment to see outgoing HTTP headers and SOAP and incoming
#Config.debug = 1
if len(sys.argv) > 1 and sys.argv[1] == '-s':
server = SOAPProxy("https://localhost:9900")
else:
server = SOAPProxy("http://localhost:9900")
# BIG data:
big = repr('.' * (1<<18) )
# ...in an object
print "server.echo_ino(big):..",
tmp = server.echo_ino(big)
print "done"
# ...in an object in an object
print "server.prop.echo2(big)..",
tmp = server.prop.echo2(big)
print "done"
# ...with keyword arguments
print 'server.echo_wkw(third = big, first = "one", second = "two")..',
tmp = server.echo_wkw(third = big, first = "one", second = "two")
print "done"
# ...with a context object
print "server.echo_wc(big)..",
tmp = server.echo_wc(big)
print "done"
# ...with a header
hd = headerType(data = {"mystring": "Hello World"})
print "server._hd(hd).echo_wc(big)..",
tmp = server._hd(hd).echo_wc(big)
print "done"
server.quit()
| Python |
#!/usr/bin/env python
#
# Copyright (c) 2001 actzero, inc. All rights reserved.
import sys
sys.path.insert(1, "..")
from SOAPpy import *
# Uncomment to see outgoing HTTP headers and SOAP and incoming
Config.dumpSOAPIn = 1
Config.dumpSOAPOut = 1
Config.debug = 1
# specify name of authorization function
Config.authMethod = "_authorize"
# Set this to 0 to test authorization
allowAll = 1
# ask for returned SOAP responses to be converted to basic python types
Config.simplify_objects = 1
# provide a mechanism to stop the server
run = 1
def quit():
global run
run=0;
if Config.SSLserver:
from M2Crypto import SSL
def _authorize(*args, **kw):
global allowAll, Config
if Config.debug:
print "Authorize (function) called! (result = %d)" % allowAll
print "Arguments: %s" % kw
if allowAll:
return 1
else:
return 0
# Simple echo
def echo(s):
global Config
# Test of context retrieval
ctx = Server.GetSOAPContext()
if Config.debug:
print "SOAP Context: ", ctx
return s + s
# An echo class
class echoBuilder2:
def echo2(self, val):
return val * 3
# A class that has an instance variable which is an echo class
class echoBuilder:
def __init__(self):
self.prop = echoBuilder2()
def echo_ino(self, val):
return val + val
def _authorize(self, *args, **kw):
global allowAll, Config
if Config.debug:
print "Authorize (method) called with arguments:"
print "*args=%s" % str(args)
print "**kw =%s" % str(kw)
print "Approved -> %d" % allowAll
if allowAll:
return 1
else:
return 0
# Echo with context
def echo_wc(s, _SOAPContext):
global Config
c = _SOAPContext
sep = '-' * 72
# The Context object has extra info about the call
if Config.debug:
print "-- XML", sep[7:]
# The original XML request
print c.xmldata
print "-- Header", sep[10:]
# The SOAP Header or None if not present
print c.header
if c.header:
print "-- Header.mystring", sep[19:]
# An element of the SOAP Header
print c.header.mystring
print "-- Body", sep[8:]
# The whole Body object
print c.body
print "-- Peer", sep[8:]
if not GSI:
# The socket object, useful for
print c.connection.getpeername()
else:
# The socket object, useful for
print c.connection.get_remote_address()
ctx = c.connection.get_security_context()
print ctx.inquire()[0].display()
print "-- SOAPAction", sep[14:]
# The SOAPaction HTTP header
print c.soapaction
print "-- HTTP headers", sep[16:]
# All the HTTP headers
print c.httpheaders
return s + s
# Echo with keyword arguments
def echo_wkw(**kw):
return kw['first'] + kw['second'] + kw['third']
# Simple echo
def echo_simple(*arg):
return arg
def echo_header(s, _SOAPContext):
global Config
c = _SOAPContext
return s, c.header
addr = ('localhost', 9900)
GSI = 0
SSL = 0
if len(sys.argv) > 1 and sys.argv[1] == '-s':
SSL = 1
if not Config.SSLserver:
raise RuntimeError, \
"this Python installation doesn't have OpenSSL and M2Crypto"
ssl_context = SSL.Context()
ssl_context.load_cert('validate/server.pem')
server = SOAPServer(addr, ssl_context = ssl_context)
prefix = 'https'
elif len(sys.argv) > 1 and sys.argv[1] == '-g':
GSI = 1
from SOAPpy.GSIServer import GSISOAPServer
server = GSISOAPServer(addr)
prefix = 'httpg'
else:
server = SOAPServer(addr)
prefix = 'http'
print "Server listening at: %s://%s:%d/" % (prefix, addr[0], addr[1])
# register the method
server.registerFunction(echo)
server.registerFunction(echo, path = "/pathtest")
server.registerFunction(_authorize)
server.registerFunction(_authorize, path = "/pathtest")
# Register a whole object
o = echoBuilder()
server.registerObject(o, path = "/pathtest")
server.registerObject(o)
# Register a function which gets called with the Context object
server.registerFunction(MethodSig(echo_wc, keywords = 0, context = 1),
path = "/pathtest")
server.registerFunction(MethodSig(echo_wc, keywords = 0, context = 1))
# Register a function that takes keywords
server.registerKWFunction(echo_wkw, path = "/pathtest")
server.registerKWFunction(echo_wkw)
server.registerFunction(echo_simple)
server.registerFunction(MethodSig(echo_header, keywords=0, context=1))
server.registerFunction(quit)
# Start the server
try:
while run:
server.handle_request()
except KeyboardInterrupt:
pass
| Python |
#!/usr/bin/env python
# Copyright (c) 2001 actzero, inc. All rights reserved.
import sys
sys.path.insert(1, "..")
from SOAPpy import *
# Uncomment to see outgoing HTTP headers and SOAP and incoming
#Config.debug = 1
Config.BuildWithNoType = 1
Config.BuildWithNoNamespacePrefix = 1
hd = headerType(data = {"mystring": "Hello World"})
server = SOAPProxy("http://localhost:9900/", header=hd)
print server.echo("Hello world")
server.quit()
| Python |
#!/usr/bin/env python
################################################################################
#
# A bunch of regression type tests for the builder and parser.
#
################################################################################
ident = '$Id: SOAPtest.py,v 1.19 2004/04/01 13:25:46 warnes Exp $'
import urllib
import sys
import unittest
import re
sys.path.insert(1, "..")
from SOAPpy import *
config=Config
config.strict_range=1
# run these tests with this variable set both to 1 and 0
config.simplify_objects=0
# as borrowed from jake.soapware.org for float compares.
def nearlyeq(a, b, prec = 1e-7):
return abs(a - b) <= abs(a) * prec
# helper
def negfloat(x):
return float(x) * -1.0
class Book(structType):
def __init__(self):
self.title = "Title of a book"
structType.__init__(self)
class Person(structType):
def __init__(self):
self.age = "49"
self.height = "5.5"
structType.__init__(self)
class Result(structType):
def __init__(self):
structType.__init__(self, name = 'Result')
self.Book = Book()
self.Person = Person()
class one:
def __init__(self):
self.str = "one"
class two:
def __init__(self):
self.str = "two"
class three:
def __init__(self):
self.str = "three"
ws = ' \t\r\n'
N = None
class SOAPTestCase(unittest.TestCase):
# big message
def notestBigMessage(self):
x=[]
for y in string.lowercase:
x.append(y*999999)
buildSOAP(x)
# test arrayType
def testArrayType(self):
x = structType( {"name":"widg1","quantity":200,
"price":decimalType(45.99),
"_typename":"LineItem"})
y = buildSOAP([x, x])
# could be parsed using an XML parser?
self.failUnless(string.find(y, "LineItem")>-1)
# test arguments ordering
def testOrdering(self):
x = buildSOAP(method="newCustomer", namespace="urn:customer", \
kw={"name":"foo1", "address":"bar"}, \
config=SOAPConfig(argsOrdering={"newCustomer":("address", "name")}))
# could be parsed using an XML parser?
self.failUnless(string.find(x, "<address ")<string.find(x, "<name "))
x = buildSOAP(method="newCustomer", namespace="urn:customer", \
kw={"name":"foo1", "address":"bar"}, \
config=SOAPConfig(argsOrdering={"newCustomer":("name", "address")}))
# could be parsed using an XML parser?
self.failUnless(string.find(x, "<address ")>string.find(x, "<name "))
# test struct
def testStructIn(self):
x = '''<?xml version="1.0" encoding="utf-8"?>
<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/" xmlns:soapenc="http://schemas.xmlsoap.org/soap/encoding/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<soap:Body soap:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<SomeMethod>
<Result>
<Book>
<title>My Life and Work</title>
</Book>
<Person>
<name>Henry Ford</name>
<age> 49 </age>
<height> 5.5 </height>
</Person>
</Result>
</SomeMethod>
</soap:Body>
</soap:Envelope>
'''
# parse rules
pr = {'SomeMethod':
{'Result':
{'Book': {'title':(NS.XSD, "string")},
'Person': {'age':(NS.XSD, "int"),
'height':negfloat}
}
}
}
y = parseSOAPRPC(x, rules=pr)
if config.simplify_objects:
self.assertEquals(y['Result']['Person']['age'], 49);
self.assertEquals(y['Result']['Person']['height'], -5.5);
else:
self.assertEquals(y.Result.Person.age, 49);
self.assertEquals(y.Result.Person.height, -5.5);
# Try the reverse
def testStructOut(self):
x = buildSOAP(Result())
def testIntFloat(self):
x='''<SOAP-ENV:Envelope
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"
SOAP-ENV:encodingStyle="http://schemas.microsoft.com/soap/encoding/clr/1.0
http://schemas.xmlsoap.org/soap/encoding/"
xmlns:i3="http://soapinterop.org/xsd" xmlns:i2="http://soapinterop.org/">
<SOAP-ENV:Body>
<i2:echoStructArray id="ref-1">
<return href="#ref-4"/>
</i2:echoStructArray>
<SOAP-ENC:Array id="ref-4" SOAP-ENC:arrayType="i3:SOAPStruct[3]">
<item href="#ref-5"/>
<item href="#ref-6"/>
<item href="#ref-7"/>
</SOAP-ENC:Array>
<i3:SOAPStruct id="ref-5">
<varString xsi:type="xsd:string">West Virginia</varString>
<varInt xsi:type="xsd:int">-546</varInt>
<varFloat xsi:type="xsd:float">-5.398</varFloat>
</i3:SOAPStruct>
<i3:SOAPStruct id="ref-6">
<varString xsi:type="xsd:string">New Mexico</varString>
<varInt xsi:type="xsd:int">-641</varInt>
<varFloat xsi:type="xsd:float">-9.351</varFloat>
</i3:SOAPStruct>
<i3:SOAPStruct id="ref-7">
<varString xsi:type="xsd:string">Missouri</varString>
<varInt xsi:type="xsd:int">-819</varInt>
<varFloat xsi:type="xsd:float">1.375</varFloat>
</i3:SOAPStruct>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>'''
y = parseSOAPRPC(x)
if(config.simplify_objects):
self.assertEquals(y['return'][0]['varString'], "West Virginia")
self.assertEquals(y['return'][1]['varInt'], -641)
self.assertEquals(y['return'][2]['varFloat'], 1.375)
else:
self.assertEquals(getattr(y,"return")[0].varString, "West Virginia")
self.assertEquals(getattr(y,"return")[1].varInt, -641)
self.assertEquals(getattr(y,"return")[2].varFloat, 1.375)
def testArray1(self):
x='''<SOAP-ENV:Envelope
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"
SOAP-ENV:encodingStyle="http://schemas.microsoft.com/soap/encoding/clr/1.0
http://schemas.xmlsoap.org/soap/encoding/"
xmlns:i3="http://soapinterop.org/xsd" xmlns:i2="http://soapinterop.org/">
<SOAP-ENV:Body>
<i2:echoStructArray id="ref-1">
<return href="#ref-4"/>
</i2:echoStructArray>
<SOAP-ENC:Array id="ref-4" SOAP-ENC:arrayType="i3:SOAPStruct[3]">
<item href="#ref-5"/>
<item href="#ref-6"/>
<item href="#ref-7"/>
</SOAP-ENC:Array>
<i3:SOAPStruct id="ref-5">
<xsd:string>West Virginia</xsd:string>
<xsd:int>-546</xsd:int>
<xsd:float>-5.398</xsd:float>
</i3:SOAPStruct>
<i3:SOAPStruct id="ref-6">
<xsd:string>New Mexico</xsd:string>
<xsd:int>-641</xsd:int>
<xsd:float>-9.351</xsd:float>
</i3:SOAPStruct>
<i3:SOAPStruct id="ref-7">
<xsd:string>Missouri</xsd:string>
<xsd:int>-819</xsd:int>
<xsd:float>1.375</xsd:float>
</i3:SOAPStruct>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>'''
y = parseSOAPRPC(x)
if(config.simplify_objects):
self.assertEquals(y["return"][0]['string'], "West Virginia")
self.assertEquals(y["return"][1]['int'], -641)
self.assertEquals(y["return"][2]['float'], 1.375)
else:
self.assertEquals(getattr(y,"return")[0].string, "West Virginia")
self.assertEquals(getattr(y,"return")[1].int, -641)
self.assertEquals(getattr(y,"return")[2].float, 1.375)
def testUTF8Encoding1(self):
x = '''<?xml version="1.0" encoding="UTF-8"?>
<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/">
<SOAP-ENV:Body SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/" xmlns:xsd="http://www.w3.org/1999/XMLSchema" xmlns:xsd2="http://www.w3.org/2000/10/XMLSchema" xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance" xmlns:xsi2="http://www.w3.org/2000/10/XMLSchema-instance">
<ns0:echoStringArrayResponse xmlns:ns0="http://soapinterop.org/">
<return2 href="#id3"/>
</ns0:echoStringArrayResponse>
<a id="id0" xmlns:ns0="http://soapinterop.org/" xsi2:type="xsd:string" xsi:type="xsd:string"></a>
<a id="id1" xmlns:ns0="http://soapinterop.org/" xsi2:type="xsd:string" xsi:type="xsd:string">Hello</a>
<a id="id2" xmlns:ns0="http://soapinterop.org/" xsi2:type="xsd:string" xsi:type="xsd:string">\'<&>"</a>
<return2 SOAP-ENC:arrayType="xsd:string[3]" id="id3" xmlns:ns0="http://soapinterop.org/">
<a href="#id0"/>
<a href="#id1"/>
<a href="#id2"/>
</return2>
</SOAP-ENV:Body></SOAP-ENV:Envelope>'''
y = parseSOAPRPC(x)
if config.simplify_objects:
self.assertEquals(y['return2'][1], "Hello")
else:
self.assertEquals(y.return2[1], "Hello")
def testUTF8Encoding2(self):
x = '''<?xml version="1.0" encoding="UTF-8"?>
<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/">
<SOAP-ENV:Body SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/" xmlns:xsd="http://www.w3.org/1999/XMLSchema" xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance">
<ns0:echoStringArrayResponse xmlns:ns0="http://soapinterop.org/">
<a xsi:type="xsd:string"></a>
<a xsi:type="xsd:string">Hello</a>
<a xsi:type="xsd:string">\'<&>"</a>
<b xsi:type="xsd:string">Goodbye</b>
</ns0:echoStringArrayResponse>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>'''
y = parseSOAPRPC(x)
self.assertEquals(type(y.a), type([]))
self.assertEquals(type(y.b), type(''))
self.assertEquals(type(y._getItemAsList('a')), type([]))
self.assertEquals(type(y._getItemAsList('b')), type([]))
self.assertEquals(y.b, 'Goodbye')
self.assertEquals(y.a, ['', 'Hello', '\'<&>"'])
self.assertEquals(y._getItemAsList('b'), ['Goodbye'])
self.assertEquals(y._getItemAsList('c'), [])
self.assertEquals(y._getItemAsList('c', 'hello'), 'hello')
def testUTF8Encoding2(self):
x = '''<?xml version="1.0" encoding="UTF-8"?>
<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/">
<SOAP-ENV:Body
SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:xsd="http://www.w3.org/1999/XMLSchema"
xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance">
<a1 SOAP-ENC:root="1">Hello</a1>
<a2 SOAP-ENC:root="0" id="id">\'<&>"</a2>
<a3>Goodbye</a3>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>'''
y = parseSOAP(x)
self.assertEquals(y.a1, 'Hello')
self.assertEquals(y.a3, 'Goodbye')
self.failIf(hasattr(y, 'a2'))
def testUTF8Encoding3(self):
x = '''<?xml version="1.0" encoding="utf-8"?>
<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/" xmlns:soapenc="http://schemas.xmlsoap.org/soap/encoding/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<soap:Body soap:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<SomeMethod>
<Result>
<Book>
<title>My Life and Work</title>
<author href="#Person-1"/>
</Book>
<Person id="Person-1">
<name>Henry Ford</name>
<address href="#Address-2"/>
</Person>
<Address id="Address-2">
<email>mailto:henryford@hotmail.com</email>
<web>http://www.henryford.com</web>
<pers href="#Person-1"/>
</Address>
</Result>
</SomeMethod>
</soap:Body>
</soap:Envelope>
'''
y = parseSOAPRPC(x)
if config.simplify_objects:
self.assertEquals(y['Result']['Book']['author']['name'], "Henry Ford")
self.assertEquals(y['Result']['Book']['author']['address']['web'], "http://www.henryford.com")
self.assertEquals(y['Result']['Book']['author']['address']['pers']['name'], "Henry Ford")
else:
self.assertEquals(y.Result.Book.author.name, "Henry Ford")
self.assertEquals(y.Result.Book.author.address.web, "http://www.henryford.com")
self.assertEquals(y.Result.Book.author.address.pers.name, "Henry Ford")
# ref example
def testRef(self):
x = '''<?xml version="1.0" encoding="utf-8"?>
<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/" xmlns:soapenc="http://schemas.xmlsoap.org/soap/encoding/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<soap:Body soap:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<echoFloatArrayResponse xmlns="http://soapinterop.org/">
<Return href="#i1" xmlns="" />
</echoFloatArrayResponse>
<soapenc:Array id="i1" soapenc:arrayType="xsd:float[4]">
<Item>0</Item>
<Item>1</Item>
<Item>-1</Item>
<Item>3853.33325</Item>
</soapenc:Array>
</soap:Body>
</soap:Envelope>
'''
y = parseSOAPRPC(x)
if config.simplify_objects:
self.assertEquals(y['Return'][0], 0)
self.assertEquals(y['Return'][1], 1)
self.assertEquals(y['Return'][2], -1)
self.failUnless(nearlyeq(y['Return'][3], 3853.33325))
else:
self.assertEquals(y.Return[0], 0)
self.assertEquals(y.Return[1], 1)
self.assertEquals(y.Return[2], -1)
self.failUnless(nearlyeq(y.Return[3], 3853.33325))
# Make sure passing in our own bodyType works.
def testBodyType(self):
a = [23, 42]
b = bodyType()
b.a = b.b = a
x = buildSOAP(b)
y = parseSOAP(x)
self.assertEquals(id(y.a), id(y.b))
self.assertEquals(y.a, a)
self.assertEquals(y.b, a)
# Test Envelope versioning (see section 4.1.2 of http://www.w3.org/TR/SOAP).
def testEnvelopeVersioning(self):
xml = '''<SOAP-ENV:Envelope
SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:xsd="http://www.w3.org/1999/XMLSchema"
xmlns:SOAP-ENV="http://new/envelope/version/"
xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance"
xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/">
<SOAP-ENV:Body>
<_1 xsi:type="xsd:int" SOAP-ENC:root="1">1</_1>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>'''
try:
parseSOAP(xml)
except Exception, e:
self.failUnless(isinstance(e, faultType))
self.assertEquals(e.faultcode, '%s:VersionMismatch' % NS.ENV_T)
self.failIf(hasattr(e, 'detail'))
# Big terrible ordered data with attributes test.
def testBigOrderedData(self):
data = '''<?xml version="1.0" encoding="UTF-8" ?>
<Envelope xmlns="http://schemas.xmlsoap.org/soap/envelope/">
<Body>
<replyBlock generic="1.0" attrib1="false" attrib2='hello'>
<itemList>
<mainItem mainattrib1='uno'>
<name>first_main_item</name>
<description>whatever etc.</description>
<infoList>
<itemInfo a1='123' a2='abc'>
<name>unoItem1</name>
</itemInfo>
<itemInfo a1='456' a2='def'>
<name>unoItem2</name>
</itemInfo>
<itemInfo a1='789' a2='ghi'>
<name>unoItem3</name>
</itemInfo>
</infoList>
</mainItem>
<mainItem mainattrib1='dos'>
<name>second_main_item</name>
<description>whatever etc.</description>
<infoList>
<itemInfo a1='3123' a2='3abc'>
<name>dosItem1</name>
</itemInfo>
<itemInfo a1='3456' a2='3def'>
<name>dosItem2</name>
</itemInfo>
<itemInfo a1='3789' a2='3ghi'>
<name>dosItem3</name>
</itemInfo>
</infoList>
</mainItem>
</itemList>
<itemList>
<mainItem mainattrib1='single'>
<name>single_main_item</name>
<description>whatever etc.</description>
<infoList>
<itemInfo a1='666' a2='xxx'>
<name>singleItem1</name>
</itemInfo>
</infoList>
</mainItem>
</itemList>
</replyBlock>
</Body>
</Envelope>'''
x = parseSOAP(data)
# print ".>",x.replyBlock.itemList._ns
y = buildSOAP(x)
def testEnvelope1(self):
my_xml2 = '''
<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<SOAP-ENV:Header>
<t:Transaction xmlns:t="some-URI" SOAP-ENV:mustUnderstand="1">
5
</t:Transaction>
</SOAP-ENV:Header>
<SOAP-ENV:Body>
<m:GetLastTradePriceResponse xmlns:m="Some-URI">
<PriceAndVolume>
<LastTradePrice>
34.5
</LastTradePrice>
<DayVolume>
10000
</DayVolume>
</PriceAndVolume>
</m:GetLastTradePriceResponse>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
(x,h) = parseSOAPRPC(my_xml2,header=1)
def testEnvelope2(self):
x ='''
<V:Envelope
xmlns:V="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:C="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:i="http://www.w3.org/1999/XMLSchema-instance"
xmlns:d="http://www.w3.org/1999/XMLSchema"
V:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<V:Body>
<m:echoStructArray
xmlns:m="urn:xmethodsInterop">
<inputStructArray
i:type="C:Array"
C:arrayType="ns3:SOAPStruct[0]"
xmlns:ns3="http://soapinterop.org/xsd"/>
</m:echoStructArray>
</V:Body>
</V:Envelope>'''
x = parseSOAPRPC(x)
def testEnvelope3(self):
x = '''<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<SOAP-ENV:Envelope SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/">
<SOAP-ENV:Body>
<m:echoStringResponse xmlns:m="http://soapinterop.org/">
<Result name="fred">hello</Result>
</m:echoStringResponse>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
x, a = parseSOAPRPC(x, attrs = 1)
if config.simplify_objects:
self.assertEquals(a[id(x['Result'])][(None, 'name')], 'fred')
else:
self.assertEquals(a[id(x.Result)][(None, 'name')], 'fred')
def testParseException(self):
x='''<SOAP-ENV:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" SOAP-ENV:encodingStyle="http://schemas.microsoft.com/soap/encoding/clr/1.0 http://schemas.xmlsoap.org/soap/encoding/" xmlns:a1="http://schemas.microsoft.com/clr/ns/System.Runtime.Serialization.Formatters">
<SOAP-ENV:Body>
<SOAP-ENV:Fault id="ref-1">
<faultcode id="ref-2">SOAP-ENV:Server</faultcode>
<faultstring id="ref-3">Exception thrown on Server</faultstring>
<detail xsi:type="a1:ServerFault">
<exceptionType id="ref-4">System.Runtime.Serialization.SerializationException, mscorlib, Version=1.0.2411.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</exceptionType>
<message id="ref-5">Soap Parser Error System.Runtime.Serialization.SerializationException: Parse Error, xsd type not valid: Array
at System.Runtime.Serialization.Formatters.Soap.SoapHandler.ProcessGetType(String value, String xmlKey)
at System.Runtime.Serialization.Formatters.Soap.SoapHandler.ProcessType(ParseRecord pr, ParseRecord objectPr)
at System.Runtime.Serialization.Formatters.Soap.SoapHandler.ProcessAttributes(ParseRecord pr, ParseRecord objectPr)
at System.Runtime.Serialization.Formatters.Soap.SoapHandler.StartElement(String prefix, String name, String urn)
at System.XML.XmlParser.ParseElement()
at System.XML.XmlParser.ParseTag()
at System.XML.XmlParser.Parse()
at System.XML.XmlParser.Parse0()
at System.XML.XmlParser.Run()</message>
<stackTrace id="ref-6"> at System.Runtime.Serialization.Formatters.Soap.SoapHandler.Error(IXmlProcessor p, Exception ex)
at System.XML.XmlParser.Run()
at System.Runtime.Serialization.Formatters.Soap.SoapParser.Run()
at System.Runtime.Serialization.Formatters.Soap.ObjectReader.Deserialize(HeaderHandler handler, ISerParser serParser)
at System.Runtime.Serialization.Formatters.Soap.SoapFormatter.Deserialize(Stream serializationStream, HeaderHandler handler)
at System.Runtime.Remoting.Channels.CoreChannel.DeserializeMessage(String mimeType, Stream xstm, Boolean methodRequest, IMessage msg, Header[] h)
at System.Runtime.Remoting.Channels.SoapServerFormatterSink.ProcessMessage(IServerChannelSinkStack sinkStack, ITransportHeaders requestHeaders, Stream requestStream, IMessage& msg, ITransportHeaders& responseHeaders, Stream& responseStream)</stackTrace>
</detail>
</SOAP-ENV:Fault>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
z = parseSOAPRPC(x)
self.assertEquals(z.__class__,faultType)
self.assertEquals(z.faultstring, "Exception thrown on Server")
def testFlatEnvelope(self):
x = '''<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<SOAP-ENV:Envelope SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"><SOAP-ENV:Body><m:echoStringResponse xmlns:m="http://soapinterop.org/"><Result></Result></m:echoStringResponse></SOAP-ENV:Body></SOAP-ENV:Envelope>
'''
z = parseSOAPRPC(x)
if config.simplify_objects:
self.assertEquals(type(z['Result']), type(''))
else:
self.assertEquals(type(z.Result), type(''))
def testNumericArray(self):
x = [1,2,3,4,5]
y = buildSOAP(x)
z = parseSOAPRPC(y)
self.assertEquals(x, z)
def testStringArray(self):
x = ["cayce", "asd", "buy"]
y = buildSOAP(x)
z = parseSOAPRPC(y)
self.assertEquals(x, z)
def testStringArray1(self):
x = arrayType(['a', 'b', 'c'])
y = buildSOAP(x)
z = parseSOAP(y)
if config.simplify_objects:
self.assertEquals(z.v1._elemsname, 'item')
self.assertEquals(z.v1, x)
else:
self.assertEquals(z['v1']['_elemsname'], 'item')
self.assertEquals(z['v1'], x)
def testStringArray2(self):
x = arrayType(['d', 'e', 'f'], elemsname = 'elementals')
y = buildSOAP(x)
z = parseSOAP(y)
if config.simplify_objects:
self.assertEquals(z.v1._elemsname, 'elementals')
self.assertEquals(z.v1, x)
else:
self.assertEquals(z['v1']['_elemsname'], 'elementals')
self.assertEquals(z['v1'], x)
def testInt1(self):
my_xml = '''
<SOAP-ENV:Envelope SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsd="http://www.w3.org/1999/XMLSchema" xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance">
<SOAP-ENV:Body>
<m:getStateName xmlns:m="http://www.soapware.org/">
<statenum xsi:type="xsd:int">41</statenum>
</m:getStateName>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
s = parseSOAPRPC(my_xml)
if config.simplify_objects:
self.assertEquals(s['statenum'], 41)
self.assertEquals(type(s['statenum']), type(0))
else:
self.assertEquals(s.statenum, 41)
self.assertEquals(type(s.statenum), type(0))
def testInt2(self):
my_xml_ns = '''
<XSOAP-ENV:Envelope XSOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:XSOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/" xmlns:XSOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" xmlns:Xxsd="http://www.w3.org/1999/XMLSchema" xmlns:Xxsi="http://www.w3.org/1999/XMLSchema-instance">
<XSOAP-ENV:Body>
<m:getStateName xmlns:m="http://www.soapware.org/">
<statenum Xxsi:type="Xxsd:int">41</statenum>
</m:getStateName>
</XSOAP-ENV:Body>
</XSOAP-ENV:Envelope>
'''
s = parseSOAPRPC(my_xml_ns)
if config.simplify_objects:
self.assertEquals(s['statenum'], 41, "NS one failed")
self.assertEquals(type(s['statenum']), type(0))
else:
self.assertEquals(s.statenum, 41, "NS one failed")
self.assertEquals(type(s.statenum), type(0))
def testPriceAndVolume(self):
my_xml2 = '''
<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<SOAP-ENV:Header>
<t:Transaction xmlns:t="some-URI" SOAP-ENV:mustUnderstand="1">
5
</t:Transaction>
</SOAP-ENV:Header>
<SOAP-ENV:Body>
<m:GetLastTradePriceResponse xmlns:m="Some-URI">
<PriceAndVolume>
<LastTradePrice>
34.5
</LastTradePrice>
<DayVolume>
10000
</DayVolume>
</PriceAndVolume>
</m:GetLastTradePriceResponse>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
s = parseSOAPRPC(my_xml2)
if config.simplify_objects:
self.assertEquals(s['PriceAndVolume']['LastTradePrice'].strip(), "34.5")
self.assertEquals(s['PriceAndVolume']['DayVolume'].strip(), "10000")
else:
self.assertEquals(s.PriceAndVolume.LastTradePrice.strip(), "34.5")
self.assertEquals(s.PriceAndVolume.DayVolume.strip(), "10000")
def testInt3(self):
my_xml3 = '''
<SOAP-ENV:Envelope SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsd="http://www.w3.org/1999/XMLSchema" xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance">
<SOAP-ENV:Body>
<Bounds>
<param>
<lowerBound xsi:type="xsd:int"> 18 </lowerBound>
<upperBound xsi:type="xsd:int"> 139</upperBound>
</param>
</Bounds>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
s = parseSOAPRPC(my_xml3)
if config.simplify_objects:
self.assertEquals(s['param']['lowerBound'], 18)
self.assertEquals(s['param']['upperBound'], 139)
else:
self.assertEquals(s.param.lowerBound, 18)
self.assertEquals(s.param.upperBound, 139)
def testBoolean(self):
my_xml4 = '''
<SOAP-ENV:Envelope SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsd="http://www.w3.org/1999/XMLSchema" xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance">
<SOAP-ENV:Body>
<Bounds>
<param SOAP-ENC:arrayType="xsd:ur-type[4]" xsi:type="SOAP-ENC:Array"><item xsi:type="xsd:int">12</item>
<item xsi:type="xsd:string">Egypt</item>
<item xsi:type="xsd:boolean">0</item>
<item xsi:type="xsd:int">-31</item>
</param>
<param1 xsi:null="1"></param1>
<param2 xsi:null="true"></param2>
<param3 xsi:type="xsd:int" xsi:null="false">7</param3>
</Bounds>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
s = parseSOAPRPC(my_xml4)
if config.simplify_objects:
self.assertEquals(s['param'][0], 12)
self.assertEquals(s['param'][1], "Egypt")
self.assertEquals(s['param'][2], 0)
self.assertEquals(s['param'][3], -31)
self.assertEquals(s['param1'], None)
self.assertEquals(s['param2'], None)
self.assertEquals(s['param3'], 7)
else:
self.assertEquals(s.param[0], 12)
self.assertEquals(s.param[1], "Egypt")
self.assertEquals(s.param[2], 0)
self.assertEquals(s.param[3], -31)
self.assertEquals(s.param1, None)
self.assertEquals(s.param2, None)
self.assertEquals(s.param3, 7)
def testFault(self):
my_xml5 = '''
<SOAP-ENV:Envelope SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsd="http://www.w3.org/1999/XMLSchema" xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance">
<SOAP-ENV:Body>
<SOAP-ENV:Fault>
<faultcode>SOAP-ENV:Client</faultcode>
<faultstring>Cant call getStateName because there are too many parameters.</faultstring>
</SOAP-ENV:Fault>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
s = parseSOAPRPC(my_xml5)
self.assertEquals(s.__class__, faultType)
self.assertEquals(s.faultcode, "SOAP-ENV:Client")
def testArray2(self):
my_xml6 = '''
<SOAP-ENV:Envelope SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsd="http://www.w3.org/1999/XMLSchema" xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance">
<SOAP-ENV:Body>
<h SOAP-ENC:arrayType="xsd:ur-type[6]" xsi:type="SOAP-ENC:Array">
<item xsi:type="xsd:int">5</item>
<item xsi:type="xsd:int">3</item>
<item xsi:type="xsd:int">2</item>
<item xsi:type="xsd:string">monkey</item>
<item xsi:type="xsd:string">cay</item>
<item>
<cat xsi:type="xsd:string">hello</cat>
<ferret SOAP-ENC:arrayType="xsd:ur-type[6]" xsi:type="SOAP-ENC:Array">
<item xsi:type="xsd:int">5</item>
<item xsi:type="xsd:int">4</item>
<item xsi:type="xsd:int">3</item>
<item xsi:type="xsd:int">2</item>
<item xsi:type="xsd:int">1</item>
<item>
<cow xsi:type="xsd:string">moose</cow>
</item>
</ferret>
<monkey xsi:type="xsd:int">5</monkey>
</item>
</h>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
q = parseSOAPRPC(my_xml6)
self.assertEquals(q[0], 5)
self.assertEquals(q[1], 3)
self.assertEquals(q[2], 2)
self.assertEquals(q[3], 'monkey')
self.assertEquals(q[4], 'cay')
x = q[5]
if config.simplify_objects:
self.assertEquals(x['monkey'], 5)
self.assertEquals(x['cat'], "hello")
self.assertEquals(x['ferret'][0], 5)
self.assertEquals(x['ferret'][3], 2)
self.assertEquals(x['ferret'][5]['cow'], "moose")
else:
self.assertEquals(x.monkey, 5)
self.assertEquals(x.cat, "hello")
self.assertEquals(x.ferret[0], 5)
self.assertEquals(x.ferret[3], 2)
self.assertEquals(x.ferret[5].cow, "moose")
def testArray3(self):
x = arrayType([5,4,3,21], "spam")
y = buildSOAP(x)
z = parseSOAPRPC(y)
self.assertEquals(x, z)
# test struct
def testStruct(self):
x = structType(name = "eggs")
x.test = 5
y = buildSOAP(x)
z = parseSOAPRPC(y)
if config.simplify_objects:
self.assertEquals( x['test'], z['test'] )
else:
self.assertEquals( x.test, z.test )
# test faults
def testFault1(self):
x = faultType("ServerError","Howdy",[5,4,3,2,1])
y = buildSOAP(x)
z = parseSOAPRPC(y)
self.assertEquals( x.faultcode , z.faultcode)
self.assertEquals( x.faultstring , z.faultstring)
self.assertEquals( x.detail , z.detail)
# Test the recursion
def testRecursion(self):
o = one()
t = two()
o.t = t
t.o = o
tre = three()
tre.o = o
tre.t = t
x = buildSOAP(tre)
y = parseSOAPRPC(x)
if config.simplify_objects:
self.assertEquals( y['t']['o']['t']['o']['t']['o']['t']['str'] , "two")
else:
self.assertEquals( y.t.o.t.o.t.o.t.str , "two")
# Test the recursion with structs
def testRecursionWithStructs(self):
o = structType("one")
t = structType("two")
o.t = t
o.str = "one"
t.o = o
t.str = "two"
tre = structType("three")
tre.o = o
tre.t = t
tre.str = "three"
x = buildSOAP(tre)
y = parseSOAPRPC(x)
if config.simplify_objects:
self.assertEquals( y['t']['o']['t']['o']['t']['o']['t']['str'] , "two")
else:
self.assertEquals( y.t.o.t.o.t.o.t.str , "two")
def testAmp(self):
m = "Test Message <tag> & </tag>"
x = structType("test")
x.msg = m
y = buildSOAP(x)
z = parseSOAPRPC(y)
if config.simplify_objects:
self.assertEquals( m , z['msg'])
else:
self.assertEquals( m , z.msg)
def testInt4(self):
my_xml7 = '''
<SOAP-ENV:Envelope SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsd="http://www.w3.org/1999/XMLSchema" xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance">
<SOAP-ENV:Body>
<Bounds>
<param>
<lowerBound xsi:type="xsd:int"> 18 </lowerBound>
<upperBound xsi:type="xsd:int"> 139</upperBound>
</param>
</Bounds>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
x = parseSOAPRPC(my_xml7)
y = buildSOAP(x)
# Does buildSOAP require a valid encoding?
def testBuildSOAPEncoding(self):
try:
x = buildSOAP('hello', encoding = 'gleck')
except LookupError, e:
if str (e)[0:16] != 'unknown encoding': raise
x = None
except:
print "Got unexpected exception: %s %s" % tuple (sys.exc_info ()[0:2])
x = ''
self.assertEquals( x , None)
# Does SOAPProxy require a valid encoding?
def testSOAPProxyEncoding(self):
try:
x = SOAPProxy('', encoding = 'gleck')
except LookupError, e:
if str (e)[0:16] != 'unknown encoding': raise
x = None
except:
print "Got unexpected exception: %s %s" % tuple (sys.exc_info ()[0:2])
x = ''
self.assertEquals( x , None)
# Does SOAPServer require a valid encoding?
def testSOAPServerEncoding(self):
try:
x = SOAPServer(('localhost', 0), encoding = 'gleck')
except LookupError, e:
if str (e)[0:16] != 'unknown encoding': raise
x = None
except:
print "Got unexpected exception: %s %s" % tuple (sys.exc_info ()[0:2])
x = ''
self.assertEquals( x , None)
def testEncodings(self):
encodings = ('US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16')
tests = ('A', u'\u0041')
for t in tests:
for i in range (len (encodings)):
x = buildSOAP (t, encoding = encodings[i])
y = parseSOAPRPC (x)
self.assertEquals( y , t)
tests = (u'\u00a1',)
for t in tests:
for i in range (len (encodings)):
try:
x = buildSOAP (t, encoding = encodings[i])
except:
if i > 0: raise
continue
y = parseSOAPRPC (x)
self.assertEquals( y , t)
tests = (u'\u01a1', u'\u2342')
for t in tests:
for i in range (len (encodings)):
try:
x = buildSOAP (t, encoding = encodings[i])
except:
if i > 1: raise
continue
y = parseSOAPRPC (x)
self.assertEquals( y , t)
def build_xml(self, schema, type, value, attrs = ''):
return '''<?xml version="1.0" encoding="UTF-8"?>
<SOAP-ENV:Envelope
SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:xsd="%(schema)s"
xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance">
<SOAP-ENV:Body>
<_1 xsi:type="xsd:%(type)s"%(attrs)s>%(value)s</_1>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>''' % {'schema': schema, 'type': type, 'value': value,
'attrs': attrs}
# Make sure the various limits are checked when parsing
def testIntegerLimits(self):
for t, l in SOAPParser.intlimits.items():
try:
parseSOAP(xml % (NS.XSD, t, 'hello'))
raise AssertionError, "parsed %s of 'hello' without error" % t
except AssertionError:
raise
except:
pass
if l[1] != None:
try:
parseSOAP(self.build_xml(NS.XSD, t, l[1] - 1))
raise AssertionError, "parsed %s of %s without error" % \
(t, l[1] - 1)
except AssertionError:
raise
except UnderflowError:
pass
if l[2] != None:
try:
parseSOAP(self.build_xml(NS.XSD, t, l[2] + 1))
raise AssertionError, "parsed %s of %s without error" % \
(t, l[2] + 1)
except AssertionError:
raise
except OverflowError:
pass
# Make sure the various limits are checked when parsing
# Next, floats. Note that chances are good this won't work in any non-Unix Pythons.
def testFloatLimits(self):
for i in \
(
('float', '-3.402823466391E+38'),
('float', '3.402823466391E+38'),
('float', '3.5e+38'),
('float', '6.9e-46'),
('double', '-1.7976931348623159E+308'),
('double', '1.7976931348623159E+308'),
('double', '1.8e308'),
('double', '2.4e-324'),
):
try:
parseSOAP(self.build_xml(NS.XSD, i[0], i[1]))
# Hide this error for now, cause it is a bug in python 2.0 and 2.1
#if not (sys.version_info[0] == 2 and sys.version_info[1] <= 2) \
# and i[1]=='1.7976931348623159E+308':
raise AssertionError, "parsed %s of %s without error" % i
except AssertionError:
raise
except (UnderflowError, OverflowError):
pass
# Make sure we can't instantiate the base classes
def testCannotInstantiateBaseClasses(self):
for t in (anyType, NOTATIONType):
try:
x = t()
raise AssertionError, "instantiated %s directly" % repr(t)
except:
pass
# Try everything that requires initial data without any.
def testMustBeInitialized(self):
for t in (CDATAType, ENTITIESType, ENTITYType, IDType, IDREFType,
IDREFSType, NCNameType, NMTOKENType, NMTOKENSType, NOTATIONType,
NameType, QNameType, anyURIType, base64Type, base64BinaryType,
binaryType, booleanType, byteType, decimalType, doubleType,
durationType, floatType, hexBinaryType, intType, integerType,
languageType, longType, negative_IntegerType, negativeIntegerType,
non_Negative_IntegerType, non_Positive_IntegerType,
nonNegativeIntegerType, nonPositiveIntegerType, normalizedStringType,
positive_IntegerType, positiveIntegerType, shortType, stringType,
timeDurationType, tokenType, unsignedByteType, unsignedIntType,
unsignedLongType, unsignedShortType, untypedType, uriType,
uriReferenceType):
try:
t()
raise AssertionError, "instantiated a %s with no value" % t.__name__
except AssertionError:
raise
except:
pass
def testInstantiations(self):
# string, ENTITY, ID, IDREF, language, Name, NCName,
# NMTOKEN, QName, untypedType
for t in (stringType, ENTITYType, IDType, IDREFType,
languageType, NameType, NCNameType, NMTOKENType,
QNameType, untypedType):
# First some things that shouldn't be taken as the current type
test = (10, (), [], {})
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad type (%s)" % \
(repr(t), repr(type(i)))
except AssertionError:
raise
except:
pass
# Now some things that should
for i in ('hello', u'goodbye'):
x = t(i)
d = x._marshalData()
if d != i:
raise AssertionError, "expected %s, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (i, z)
# ENTITIES, IDREFS, NMTOKENS
for t in (ENTITIESType, IDREFSType, NMTOKENSType):
# First some things that shouldn't be taken as the current type
test = ({}, lambda x: x, ((),), ([],), [{}], [()])
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad type (%s)" % \
repr(t), repr(type(i))
except AssertionError:
raise
except:
pass
# Now some things that should
for i in ('hello', (), [], ('hello', 'goodbye'), ['aloha', 'guten_tag']):
x = t(i)
d = x._marshalData()
if type(i) in (type(()), type([])):
j = list(i)
else:
j = [i]
k = ' '.join(j)
if d != k:
raise AssertionError, "expected %s, got %s" % (k, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != j:
raise AssertionError, "expected %s, got %s" % (repr(j), repr(z))
# uri, uriReference, anyURI
for t in (uriType, uriReferenceType, anyURIType):
# First some things that shouldn't be taken as the current type
test = (10, (), [], {})
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad type (%s)" % \
t.__name__, repr(type(i))
except AssertionError:
raise
except:
pass
# Now some things that should
for i in ('hello', u'goodbye', '!@#$%^&*()-_=+[{]}\|;:\'",<.>/?`~'):
x = t(i)
d = x._marshalData()
j = urllib.quote(i)
if d != j:
raise AssertionError, "expected %s, got %s" % (j, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# token First some things that shouldn't be valid because of type
test = (42, 3.14, (), [], {})
t = tokenType
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad type (%s)" % (t.__name__, repr(i))
except AssertionError:
raise
except AttributeError:
pass
# Now some things that shouldn't be valid because of content
test = (' hello', 'hello ', 'hel\nlo', 'hel\tlo', 'hel lo', ' \n \t ')
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % (t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should be valid
for i in ('', 'hello', u'hello'):
x = t(i)
d = x._marshalData()
if d != i:
raise AssertionError, "expected %s, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i and i != '':
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
#### CDATA, normalizedString
for t in (CDATAType, normalizedStringType):
# First some things that shouldn't be valid because of type
test = (42, 3.14, (), [], {})
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad type (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except AttributeError:
pass
# Now some things that shouldn't be valid because of content
test = ('hel\nlo', 'hel\rlo', 'hel\tlo', '\n\r\t')
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should be valid
for i in ('', 'hello', u'hello', 'hel lo'):
x = t(i)
d = x._marshalData()
if d != i:
raise AssertionError, "expected %s, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i and i != '':
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
#### boolean
# First some things that shouldn't be valid
test = (10, 'hello', (), [], {})
t = booleanType
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % (t.__name__, repr(i))
except AssertionError:
raise
except:
pass
# Now some things that should
for i in ((0, 'false'), ('false', 'false'), (1, 'true'),
('true', 'true'), (0.0, 'false'), (1.0, 'true')):
x = t(i[0])
d = x._marshalData()
if d != i[1]:
raise AssertionError, "%s: expected %s, got %s" % (i[0], i[1], d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
j = ('false', 'true')[z]
if j != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], repr(i[1]), repr(j))
# Now test parsing, both valid and invalid
test = (('10', None), ('hello', None), ('false', 0), ('FALSE', 0),
(ws + 'false' + ws, 0), (ws + '0' + ws, 0),
('0', 0), ('true', 1), ('TRUE', 1), ('1', 1),
(ws + 'true' + ws, 1), (ws + '1' + ws, 1))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != None:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
# Can we give it a name and no type?
#print
x = t(1, name = 'George', typed = 0)
#print "x=",x
y = buildSOAP(x)
#print "y=",y
z = parseSOAP(y)
#print "z=",z
test = 'true'
if z.George != test:
raise AssertionError, "expected %s, got %s" % (repr(test), repr(z))
# How about some attributes, set in various and sundry manners?
x = t(1, attrs = {'nonamespaceURI': 1})
x._setAttrs({(None, 'NonenamespaceURI'): 2,
('http://some/namespace', 'namespaceURIattr1'): 3})
x._setAttr(('http://some/other/namespace', 'namespaceURIattr2'), 4)
self.assertEquals( x._getAttr('nonamespaceURI') , 1)
self.assertEquals( x._getAttr('NonenamespaceURI') , 2)
self.assertEquals( x._getAttr(('http://some/namespace',
'namespaceURIattr1')) , 3)
self.assertEquals( x._getAttr(('http://some/other/namespace',
'namespaceURIattr2')) , 4)
self.assertEquals( x._getAttr('non-extant attr') , None)
y = buildSOAP(x)
z = parseSOAPRPC(y)
self.assertEquals( z , 1)
#### decimal
# First some things that shouldn't be valid
test = ('hello', (), [], {})
t = decimalType
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad type (%s)" % \
(t.__name__, repr(type(i)))
except AssertionError:
raise
except:
pass
# Now some things that should
for i in (10, 3.14, 23L):
x = t(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %f, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', None), ('1.2.3', None), ('10', 10), ('10.', 10),
('.1', .1), ('.1000000', .1), (ws + '10.4' + ws, 10.4))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != None:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
#### float
# First some things that shouldn't be valid
test = ('hello', (), [], {}, -3.402823466391E+38, 3.402823466391E+38)
t = floatType
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (10, 3.14, 23L, -3.4028234663852886E+38, 3.4028234663852886E+38):
x = t(i)
d = x._marshalData()
if not nearlyeq(float(d), i):
raise AssertionError, "expected %f, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if not nearlyeq(z, i):
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', None), ('1.2.3', None), ('10', 10), ('10.', 10),
('.1', .1), ('.1000000', .1), (ws + '10.4' + ws, 10.4),
('-3.402823466391E+38', None), ('3.402823466391E+38', None),
('-3.4028234663852886E+38', -3.4028234663852886E+38),
('3.4028234663852886E+38', 3.4028234663852886E+38))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if abs(z - i[1]) > 1e-6:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != None:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
#### double
# First some things that shouldn't be valid
test = ('hello', (), [], {},
-1.7976931348623159E+308, 1.7976931348623159E+308)
t = doubleType
for i in test:
try:
t(i)
# Hide this error for now, cause it is a bug in python 2.0 and 2.1
if not (sys.version_info[0] == 2 and sys.version_info[1] <= 2
and i==1.7976931348623159E+308):
raise AssertionError, \
"instantiated a double with a bad value (%s)" % repr(i)
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (10, 3.14, 23L, -1.79769313486E+308, 1.79769313486E+308):
x = t(i)
d = x._marshalData()
if not nearlyeq(float(d), i):
raise AssertionError, "expected %s, got %s" % (i, str(x))
y = buildSOAP(x)
z = parseSOAPRPC(y)
if not nearlyeq(z, i):
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', None), ('1.2.3', None), ('10', 10), ('10.', 10),
('.1', .1), ('.1000000', .1), (ws + '10.4' + ws, 10.4),
('-1.7976931348623159E+308', None), ('1.7976931348623158E+308', None),
('-1.79769313486E+308', -1.79769313486E+308),
('1.79769313486E+308', 1.79769313486E+308))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if abs(z - i[1]) > 1e-6:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != None:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
#### hexBinary
x = ''
for i in range(256):
x += chr(i)
test = ('', x, 'hello')
t = hexBinaryType
l = []
for i in test:
l.append(hexBinaryType(i))
x = buildSOAP(l)
y = parseSOAPRPC(x)
for i in range(len(test)):
if test[i] != y[i]:
raise AssertionError, "@ %d expected '%s', got '%s'" % \
(i, test[i], y[i])
# Now test parsing, both valid and invalid
test = (('hello', None), ('6163 747A65726F', None), ('6163747A65726', None),
('6163747A65726F', 'actzero'), (ws + '6163747A65726F' + ws, 'actzero'))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != None:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
#### base64Binary and base64
s = ''
for i in range(256):
s += chr(i)
for t in (base64BinaryType, base64Type):
# First some things that shouldn't be valid
test = ((), [], {}, lambda x: x)
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except AttributeError:
pass
# Now some things that should
test = ('', s, u'hello')
l = []
for i in test:
l.append(t(i))
x = buildSOAP(l)
y = parseSOAPRPC(x)
for i in range(len(test)):
if test[i] != y[i]:
raise AssertionError, "@ %d expected '%s', got '%s'" % \
(i, test[i], y[i])
# Now test parsing, both valid and invalid
test = (('hello', None), ('YWN0emVybw=', None),
('YWN 0emVybw==', 'actzero'), ('YWN0emVybw==', 'actzero'),
(ws + 'YWN0emVybw==' + ws, 'actzero'))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != None:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
#### binary (uses s from above)
# First some check invalid encodings
try:
x = binaryType('hello', encoding = 'yellow')
raise AssertionError, "created binary with invalid encoding"
except AssertionError:
raise
except:
pass
for t in ('hex', 'base64'):
# First some things that shouldn't be valid
test = ((), [], {}, lambda x: x)
for i in test:
try:
binaryType(i, encoding = t)
raise AssertionError, \
"instantiated a %s binary with a bad value (%s)" % \
(e, repr(i))
except AssertionError:
raise
except AttributeError:
pass
# Now some things that should
test = ('', s, u'hello')
l = []
for i in test:
l.append(binaryType(i, encoding = t))
x = buildSOAP(l)
y = parseSOAPRPC(x)
for i in range(len(test)):
if test[i] != y[i]:
raise AssertionError, "@ %d expected '%s', got '%s'" % \
(i, test[i], y[i])
# Now test parsing, both valid and invalid
if t == 'hex':
test = (('hello', None), ('6163 747A65726F', None),
('6163747A65726', None), ('6163747A65726F', 'actzero'),
(ws + '6163747A65726F' + ws, 'actzero'))
else:
test = (('hello', None), ('YWN0emVybw=', None),
('YWN 0emVybw==', 'actzero'), ('YWN0emVybw==', 'actzero'),
(ws + 'YWN0emVybw==' + ws, 'actzero'))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(NS.XSD, 'binary', i[0],
' encoding="%s"' % t))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != None:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t, sys.exc_info()[0], sys.exc_info()[1])
# Finally try an Array of binaries (with references!)
test = ('', s, u'hello')
l = []
for i in test:
l.append(binaryType(i, encoding = t))
l.append(l[1])
x = buildSOAP(l)
y = parseSOAPRPC(x)
for i in range(len(test)):
if test[i] != y[i]:
raise AssertionError, "@ %d expected '%s', got '%s'" % \
(i, test[i], y[i])
# Make sure the references worked
self.assertEquals( id(y[1]) , id(y[3]))
def badTest(self, t, data):
for i in data:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except:
pass
def goodTest(self, t, data):
for i in data:
x = t(i[0])
d = x._marshalData()
if d != i[1]:
raise AssertionError, "%s(%s): expected %s, got %s" % \
(t.__name__, repr(i[0]), i[1], d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i[2]:
raise AssertionError, "%s(%s): expected %s, got %s" % \
(t.__name__, repr(i[0]), repr(i[2]), repr(z))
def parseTest(self, t, data):
for i in data:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4],
i[0]))
if z != i[1]:
raise AssertionError, "%s(%s): expected %s, got %s" % \
(t.__name__, repr(i[0]), i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def allTests(self, t, baddata, gooddata, parsedata):
self.badTest(t, baddata)
self.goodTest(t, gooddata)
self.parseTest(t, parsedata)
# duration and timeDuration
def testTimeDuration(self):
baddata = \
(
'hello',
('hello',),
(-10, -10),
(-10, 0, -10),
(10.5, 10.5),
(0, 10.5, 0, 10.5, 0),
(1, 2, 3, 4, 5, 6, 7),
(1, 2, 'hello', 4, 5, 6),
(1, 2, 3.5, 4, 5, 6),
)
gooddata = \
(
(0, 'PT0S', (N, N, N, N, N, 0.0,)),
((), 'PT0S', (N, N, N, N, N, 0.0,)),
([], 'PT0S', (N, N, N, N, N, 0.0,)),
((0.5,), 'PT0.5S', (N, N, N, N, N, 0.5,)),
(10L, 'PT10S', (N, N, N, N, N, 10.0,)),
(-10, '-PT10S', (N, N, N, N, N, -10.0,)),
(10.5, 'PT10.5S', (N, N, N, N, N, 10.5,)),
((10L, 20), 'PT10M20S', (N, N, N, N, 10, 20.0)),
((-10, 20), '-PT10M20S', (N, N, N, N, -10, 20.0)),
((10, 0), 'PT10M', (N, N, N, N, 10, N)),
((10, 0, 0), 'PT10H', (N, N, N, 10, N, N)),
((10, 0L, 0, 0), 'P10D', (N, N, 10, N, N, N)),
((10, 0, 0, 0, 0), 'P10M', (N, 10, N, N, N, N)),
((10, 0, 0, 0L, 0, 0), 'P10Y', (10, N, N, N, N, N)),
((-10, 0, 0, 0, 0, 0), '-P10Y', (-10, N, N, N, N, N)),
((10, 0, 0, 0, 0, 20L), 'P10YT20S', (10, N, N, N, N, 20.0,)),
((1, 2, 3, 4, 5, 6.75), 'P1Y2M3DT4H5M6.75S',
(1, 2, 3, 4, 5, 6.75)),
((-1, 2, 3, 4, 5, 6.75), '-P1Y2M3DT4H5M6.75S',
(-1, 2, 3, 4, 5, 6.75)),
((1, 2, 3, 10, 30, 0), 'P1Y2M3DT10H30M',
(1, 2, 3, 10, 30, N)),
((1e6, 2e6, 3e6, 4e6, 5e6, 6.7e6),
'P1000000Y2000000M3000000DT4000000H5000000M6700000S',
(1e6, 2e6, 3e6, 4e6, 5e6, 6.7e6)),
((1347, 0, N, 0, 0), 'P1347M', (N, 1347, N, N, N, N)),
((-1347, 0, 0, 0, N), '-P1347M', (N, -1347, N, N, N, N)),
((1e15, 0, 0, 0, 0), 'P1000000000000000M',
(N, 1000000000000000L, N, N, N, N)),
((-1e15, 0, 0, 0, 0), '-P1000000000000000M',
(N, -1000000000000000L, N, N, N, N)),
((1000000000000000L, 0, 0, 0, 0), 'P1000000000000000M',
(N, 1000000000000000L, N, N, N, N)),
((-1000000000000000L, 0, 0, 0, 0), '-P1000000000000000M',
(N, -1000000000000000L, N, N, N, N)),
)
parsedata = (
('hello', N),
('P T0S', N),
('P10.5Y10.5M', N),
('P1Y2MT', N),
('PT0S', (N, N, N, N, N, 0,)),
('P10Y', (10, N, N, N, N, N)),
(ws + 'P10M' + ws, (N, 10, N, N, N, N)),
('P0Y1347M', (0, 1347, N, N, N, N)),
('P0Y1347M0D', (0, 1347, 0, N, N, N)),
('P0MT0M', (N, 0, N, N, 0, N)),
)
for t in (durationType, timeDurationType):
self.allTests(t, baddata, gooddata, parsedata)
# dateTime, timeInstant, and timePeriod
def testTimePeriod(self):
baddata = \
(
'hello',
('hello',),
(1, 2, 3, 4, 5),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
(1, 2, 3, 4, 5, 'hello'),
(1, 2.5, 3, 4, 5, 6),
(1, 0, 3, 4, 5, 6),
(1, 13, 3, 4, 5, 6),
(1, 1, 0, 4, 5, 6),
(1, 1, 32, 4, 5, 6),
(1, 2, 29, 4, 5, 6),
(0, 2, 30, 4, 5, 6),
(100, 2, 29, 4, 5, 6),
(1, 2, 3, -1, 5, 6),
(1, 2, 3, 24, 5, 6),
(1, 2, 3, 4, -1, 6),
(1, 2, 3, 4, 60, 6),
(1, 2, 3, 4, 5, -1),
(1, 2, 3, 4, 5, 61),
(1, 3, 32, 4, 5, 6),
(1, 4, 31, 4, 5, 6),
(1, 5, 32, 4, 5, 6),
(1, 6, 31, 4, 5, 6),
(1, 7, 32, 4, 5, 6),
(1, 8, 32, 4, 5, 6),
(1, 9, 31, 4, 5, 6),
(1, 10, 32, 4, 5, 6),
(1, 11, 31, 4, 5, 6),
(1, 12, 32, 4, 5, 6),
)
gooddata = \
(
(1L, '1970-01-01T00:00:01Z', (1970, 1, 1, 0, 0, 1.0)),
(1.5, '1970-01-01T00:00:01.5Z', (1970, 1, 1, 0, 0, 1.5)),
((-1, 2, 3, 4, 5, 6), '-0001-02-03T04:05:06Z',
(-1, 2, 3, 4, 5, 6.0)),
((1, 2, 3, 4, 5, 6), '0001-02-03T04:05:06Z',
(1, 2, 3, 4, 5, 6.0)),
((10, 2, 3, 4, 5, 6), '0010-02-03T04:05:06Z',
(10, 2, 3, 4, 5, 6.0)),
((100, 2, 3, 4, 5, 6), '0100-02-03T04:05:06Z',
(100, 2, 3, 4, 5, 6.0)),
((1970, 2, 3, 4, 5, 6), '1970-02-03T04:05:06Z',
(1970, 2, 3, 4, 5, 6.0)),
((-1970, 2, 3, 4, 5, 6), '-1970-02-03T04:05:06Z',
(-1970, 2, 3, 4, 5, 6.0)),
((1970L, 2.0, 3.0, 4L, 5L, 6.875), '1970-02-03T04:05:06.875Z',
(1970, 2, 3, 4, 5, 6.875)),
((11990, 1, 2, 3, 4L, 5.25, 0, 0, 0),
'11990-01-02T03:04:05.25Z',
(11990, 1, 2, 3, 4, 5.25)),
((1e15, 1, 2, 3, 4L, 5.25, 0, 0, 0),
'1000000000000000-01-02T03:04:05.25Z',
(1e15, 1, 2, 3, 4, 5.25)),
((-1e15, 1, 2, 3, 4L, 5.25, 0, 0, 0),
'-1000000000000000-01-02T03:04:05.25Z',
(-1e15, 1, 2, 3, 4, 5.25)),
((1000000000000000L, 1, 2, 3, 4L, 5.25, 0, 0, 0),
'1000000000000000-01-02T03:04:05.25Z',
(1e15, 1, 2, 3, 4, 5.25)),
((-1000000000000000L, 1, 2, 3, 4L, 5.25, 0, 0, 0),
'-1000000000000000-01-02T03:04:05.25Z',
(-1e15, 1, 2, 3, 4, 5.25)),
)
parsedata = \
(
# Some strings that won't match the r.e.
('hello', N),
('1970 -01 -01T00:00:01Z', N),
('0001-02-03t07:08:23Z', N),
# Invalid ranges
('2001-00-03T07:08:23Z', N),
('2001-13-03T07:08:23Z', N),
('2001-02-00T07:08:23Z', N),
('2001-02-29T07:08:23Z', N),
('2000-02-30T07:08:23Z', N),
('1900-02-29T07:08:23Z', N),
('2001-02-03T24:08:23Z', N),
('2001-02-03T04:60:23Z', N),
('2001-02-03T04:05:61Z', N),
('2001-01-32T04:05:06Z', N),
('2001-03-32T04:05:06Z', N),
('2001-04-31T04:05:06Z', N),
('2001-05-32T04:05:06Z', N),
('2001-06-31T04:05:06Z', N),
('2001-07-32T04:05:06Z', N),
('2001-08-32T04:05:06Z', N),
('2001-09-31T04:05:06Z', N),
('2001-10-32T04:05:06Z', N),
('2001-11-31T04:05:06Z', N),
('2001-12-32T04:05:06Z', N),
# Whitespace
(ws + '1970-01-01T00:00:00Z' + ws, (1970, 1, 1, 0, 0, 0)),
# No timezones
('11971-02-03T04:05:06.125', (11971, 2, 3, 4, 5, 6.125)),
('1971-02-03T04:05:06.125', (1971, 2, 3, 4, 5, 6.125)),
('-1971-02-03T04:05:06.125', (-1971, 2, 3, 4, 5, 6.125)),
# Non-zulu
('11971-02-03T04:05:06.125-07:08', (11971, 2, 3, 11, 13, 6.125)),
('11971-02-03T04:05:06.125+07:08', (11971, 2, 2, 20, 57, 6.125)),
('-11971-02-03T04:05:06.125-07:08', (-11971, 2, 3, 11, 13, 6.125)),
('-11971-02-03T04:05:06.125+07:08', (-11971, 2, 2, 20, 57, 6.125)),
('1971-02-03T04:05:06.125-07:08', (1971, 2, 3, 11, 13, 6.125)),
('1971-02-03T04:05:06.125+07:08', (1971, 2, 2, 20, 57, 6.125)),
('-1971-02-03T04:05:06.125-07:08', (-1971, 2, 3, 11, 13, 6.125)),
('-1971-02-03T04:05:06.125+07:08', (-1971, 2, 2, 20, 57, 6.125)),
# Edgepoints (ranges)
('2001-01-03T07:08:09Z', (2001, 1, 3, 7, 8, 9)),
('2001-12-03T07:08:09Z', (2001, 12, 3, 7, 8, 9)),
('2001-02-01T07:08:09Z', (2001, 2, 1, 7, 8, 9)),
('2001-02-28T07:08:09Z', (2001, 2, 28, 7, 8, 9)),
('2000-02-29T07:08:09Z', (2000, 2, 29, 7, 8, 9)),
('1900-02-28T07:08:09Z', (1900, 2, 28, 7, 8, 9)),
('2001-02-03T00:08:09Z', (2001, 2, 3, 0, 8, 9)),
('2001-02-03T23:08:09Z', (2001, 2, 3, 23, 8, 9)),
('2001-02-03T04:00:09Z', (2001, 2, 3, 4, 0, 9)),
('2001-02-03T04:59:09Z', (2001, 2, 3, 4, 59, 9)),
('2001-02-03T04:05:00Z', (2001, 2, 3, 4, 5, 0)),
('2001-02-03T04:05:60.9Z', (2001, 2, 3, 4, 5, 60.9)),
('2001-01-31T04:05:06Z', (2001, 1, 31, 4, 5, 6)),
('2001-03-31T04:05:06Z', (2001, 3, 31, 4, 5, 6)),
('2001-04-30T04:05:06Z', (2001, 4, 30, 4, 5, 6)),
('2001-05-31T04:05:06Z', (2001, 5, 31, 4, 5, 6)),
('2001-06-30T04:05:06Z', (2001, 6, 30, 4, 5, 6)),
('2001-07-31T04:05:06Z', (2001, 7, 31, 4, 5, 6)),
('2001-08-31T04:05:06Z', (2001, 8, 31, 4, 5, 6)),
('2001-09-30T04:05:06Z', (2001, 9, 30, 4, 5, 6)),
('2001-10-31T04:05:06Z', (2001, 10, 31, 4, 5, 6)),
('2001-11-30T04:05:06Z', (2001, 11, 30, 4, 5, 6)),
('2001-12-31T04:05:06Z', (2001, 12, 31, 4, 5, 6)),
# Edgepoints (crossing boundaries)
('0001-01-01T07:08:23+07:08', (1, 1, 1, 0, 0, 23)),
('0001-01-01T07:07:42+07:08', (0, 12, 31, 23, 59, 42)),
('-0004-01-01T07:07:42+07:08', (-5, 12, 31, 23, 59, 42)),
('2001-03-01T07:07:42+07:08', (2001, 2, 28, 23, 59, 42)),
('2000-03-01T07:07:42+07:08', (2000, 2, 29, 23, 59, 42)),
('1900-03-01T07:07:42+07:08', (1900, 2, 28, 23, 59, 42)),
)
for t in (dateTimeType, timeInstantType, timePeriodType):
self.allTests(t, baddata, gooddata, parsedata)
# recurringInstant
def testRecurringInstant(self):
baddata = \
(
'hello',
('hello',),
(1, 2, N, 3, 4, 5),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
(1, 2, 3, 4, 5, 'hello'),
(1, 2, 3.5, 4, 5, 6),
)
gooddata = \
(
(1L, '1970-01-01T00:00:01Z', (1970, 1, 1, 0, 0, 1.0)),
(1.5, '1970-01-01T00:00:01.5Z', (1970, 1, 1, 0, 0, 1.5)),
(1e9, '2001-09-09T01:46:40Z', (2001, 9, 9, 1, 46, 40.0)),
((1, 1, 2, 3, 4, 5), '-01-01-02T03:04:05Z',
(1, 1, 2, 3, 4, 5)),
((-1, 1, 2, 3, 4, 5), '--01-01-02T03:04:05Z',
(-1, 1, 2, 3, 4, 5)),
((10, 1, 2, 3, 4, 5), '-10-01-02T03:04:05Z',
(10, 1, 2, 3, 4, 5)),
((-10, 1, 2, 3, 4, 5), '--10-01-02T03:04:05Z',
(-10, 1, 2, 3, 4, 5)),
((100, 1, 2, 3, 4, 5), '0100-01-02T03:04:05Z',
(100, 1, 2, 3, 4, 5)),
((-100, 1, 2, 3, 4, 5), '-0100-01-02T03:04:05Z',
(-100, 1, 2, 3, 4, 5)),
((1970L, 1, 2, 3, 4, 5), '1970-01-02T03:04:05Z',
(1970, 1, 2, 3, 4, 5)),
((1970L, 1, 2L, 3, 4.0, 5.25), '1970-01-02T03:04:05.25Z',
(1970, 1, 2, 3, 4, 5.25)),
((11990, 1, 2, 3L, 4, 5.25), '11990-01-02T03:04:05.25Z',
(11990, 1, 2, 3, 4, 5.25)),
((1e15, 1, 2, 3L, 4, 5.25),
'1000000000000000-01-02T03:04:05.25Z',
(1e15, 1, 2, 3, 4, 5.25)),
((-1e15, 1, 2, 3L, 4, 5.25),
'-1000000000000000-01-02T03:04:05.25Z',
(-1e15, 1, 2, 3, 4, 5.25)),
((N, 1, 2, 3, 4L, 5.25), '---01-02T03:04:05.25Z',
(N, 1, 2, 3, 4, 5.25)),
((N, N, 2, 3, 4, 5.25, 0, 0, 0), '-----02T03:04:05.25Z',
(N, N, 2, 3, 4, 5.25)),
((N, N, -2, 3, 4, 5.25, 0, 0, 0), '------02T03:04:05.25Z',
(N, N, -2, 3, 4, 5.25)),
((N, N, N, 3, 4, 5.25), '------T03:04:05.25Z',
(N, N, N, 3, 4, 5.25)),
((N, N, N, N, 4, 5.25, 0, 0, 0), '------T-:04:05.25Z',
(N, N, N, N, 4, 5.25)),
((N, N, N, N, N, 5.25), '------T-:-:05.25Z',
(N, N, N, N, N, 5.25)),
((N, N, N, N, N, -5.25), '-------T-:-:05.25Z',
(N, N, N, N, N, -5.25)),
((N, N, N, N, N, N, 0, 0, 0), '------T-:-:-Z',
(N, N, N, N, N, N)),
((N, N, N, N, N, N, N), '------T-:-:-Z',
(N, N, N, N, N, N)),
((N, N, N, N, N, N, N, N),
'------T-:-:-Z', (N, N, N, N, N, N)),
((N, N, N, N, N, N, N, N, N),
'------T-:-:-Z', (N, N, N, N, N, N)),
)
parsedata = \
(
# Some strings that won't match the r.e.
('hello', N),
('1970 -01 -01T00:00:01Z', N),
('0001-01-01t07:08:23+07:08', N),
# Invalid ranges
('2001-00-03T07:08:23Z', N),
('2001-13-03T07:08:23Z', N),
('2001-02-00T07:08:23Z', N),
('2001-02-29T07:08:23Z', N),
('2000-02-30T07:08:23Z', N),
('1900-02-29T07:08:23Z', N),
('2001-02-03T24:08:23Z', N),
('2001-02-03T04:60:23Z', N),
('2001-02-03T04:05:61Z', N),
('2001-01-32T04:05:06Z', N),
('2001-03-32T04:05:06Z', N),
('2001-04-31T04:05:06Z', N),
('2001-05-32T04:05:06Z', N),
('2001-06-31T04:05:06Z', N),
('2001-07-32T04:05:06Z', N),
('2001-08-32T04:05:06Z', N),
('2001-09-31T04:05:06Z', N),
('2001-10-32T04:05:06Z', N),
('2001-11-31T04:05:06Z', N),
('2001-12-32T04:05:06Z', N),
# Whitespace
(ws + '1970-01-01T00:00:01Z' + ws, (1970, 1, 1, 0, 0, 1)),
# No timezones
('11971-02-03T04:05:06.125', (11971, 2, 3, 4, 5, 6.125)),
('-11971-02-03T04:05:06.125', (-11971, 2, 3, 4, 5, 6.125)),
('1971-02-03T04:05:06.125', (1971, 2, 3, 4, 5, 6.125)),
('-1971-02-03T04:05:06.125', (-1971, 2, 3, 4, 5, 6.125)),
('-71-02-03T04:05:06.125', (71, 2, 3, 4, 5, 6.125)),
('--71-02-03T04:05:06.125', (-71, 2, 3, 4, 5, 6.125)),
('---02-03T04:05:06.125', (N, 2, 3, 4, 5, 6.125)),
('----02-03T04:05:06.125', (N, -2, 3, 4, 5, 6.125)),
('-----03T04:05:06.125', (N, N, 3, 4, 5, 6.125)),
('------03T04:05:06.125', (N, N, -3, 4, 5, 6.125)),
('------T04:05:06.125', (N, N, N, 4, 5, 6.125)),
('-------T04:05:06.125', (N, N, N, -4, 5, 6.125)),
('------T-:05:06.125', (N, N, N, N, 5, 6.125)),
('-------T-:05:06.125', (N, N, N, N, -5, 6.125)),
('------T-:-:06.125', (N, N, N, N, N, 6.125)),
('-------T-:-:06.125', (N, N, N, N, N, -6.125)),
('------T-:-:-', (N, N, N, N, N, N)),
('-------T-:-:-', (N, N, N, N, N, N)),
# Non-zulu
('11971-02-03T04:05:06.125-07:08', (11971, 2, 3, 11, 13, 6.125)),
('11971-02-03T04:05:06.125+07:08', (11971, 2, 2, 20, 57, 6.125)),
('-11971-02-03T04:05:06.125-07:08', (-11971, 2, 3, 11, 13, 6.125)),
('-11971-02-03T04:05:06.125+07:08', (-11971, 2, 2, 20, 57, 6.125)),
('1971-02-03T04:05:06.125-07:08', (1971, 2, 3, 11, 13, 6.125)),
('1971-02-03T04:05:06.125+07:08', (1971, 2, 2, 20, 57, 6.125)),
('-1971-02-03T04:05:06.125-07:08', (-1971, 2, 3, 11, 13, 6.125)),
('-1971-02-03T04:05:06.125+07:08', (-1971, 2, 2, 20, 57, 6.125)),
('-71-02-03T04:05:06.125-07:08', (71, 2, 3, 11, 13, 6.125)),
('-71-02-03T04:05:06.125+07:08', (71, 2, 2, 20, 57, 6.125)),
('--71-02-03T04:05:06.125-07:08', (-71, 2, 3, 11, 13, 6.125)),
('--71-02-03T04:05:06.125+07:08', (-71, 2, 2, 20, 57, 6.125)),
('---02-03T04:05:06.125-07:08', (N, 2, 3, 11, 13, 6.125)),
('---02-03T04:05:06.125+07:08', (N, 2, 2, 20, 57, 6.125)),
('----02-03T04:05:06.125-07:08', (N, -2, 3, 11, 13, 6.125)),
('----02-03T04:05:06.125+07:08', (N, -2, 2, 20, 57, 6.125)),
('-----03T04:05:06.125-07:08', (N, N, 3, 11, 13, 6.125)),
('-----03T04:05:06.125+07:08', (N, N, 2, 20, 57, 6.125)),
('------03T04:05:06.125-07:08', (N, N, -3, 11, 13, 6.125)),
('------03T04:05:06.125+07:08', (N, N, -4, 20, 57, 6.125)),
('------T04:05:06.125-07:08', (N, N, N, 11, 13, 6.125)),
('------T04:05:06.125+07:08', (N, N, N, -4, 57, 6.125)),
('-------T04:05:06.125-07:08', (N, N, N, 3, 13, 6.125)),
('-------T04:05:06.125+07:08', (N, N, N, -12, 57, 6.125)),
('------T-:05:06.125-07:08', (N, N, N, N, 433, 6.125)),
('------T-:05:06.125+07:08', (N, N, N, N, -423, 6.125)),
('-------T-:05:06.125-07:08', (N, N, N, N, 423, 6.125)),
('-------T-:05:06.125+07:08', (N, N, N, N, -433, 6.125)),
('------T-:-:06.125-07:08', (N, N, N, N, 428, 6.125)),
('------T-:-:06.125+07:08', (N, N, N, N, -428, 6.125)),
('-------T-:-:06.125-07:08', (N, N, N, N, 427, 53.875)),
('-------T-:-:06.125+07:08', (N, N, N, N, -429, 53.875)),
('------T-:-:--07:08', (N, N, N, N, 428, 0)),
('------T-:-:-+07:08', (N, N, N, N, -428, 0)),
('-------T-:-:--07:08', (N, N, N, N, 428, 0)),
('-------T-:-:-+07:08', (N, N, N, N, -428, 0)),
# Edgepoints (ranges)
('2001-01-03T07:08:09Z', (2001, 1, 3, 7, 8, 9)),
('2001-12-03T07:08:09Z', (2001, 12, 3, 7, 8, 9)),
('2001-02-01T07:08:09Z', (2001, 2, 1, 7, 8, 9)),
('2001-02-28T07:08:09Z', (2001, 2, 28, 7, 8, 9)),
('2000-02-29T07:08:09Z', (2000, 2, 29, 7, 8, 9)),
('1900-02-28T07:08:09Z', (1900, 2, 28, 7, 8, 9)),
('2001-02-03T00:08:09Z', (2001, 2, 3, 0, 8, 9)),
('2001-02-03T23:08:09Z', (2001, 2, 3, 23, 8, 9)),
('2001-02-03T04:00:09Z', (2001, 2, 3, 4, 0, 9)),
('2001-02-03T04:59:09Z', (2001, 2, 3, 4, 59, 9)),
('2001-02-03T04:05:00Z', (2001, 2, 3, 4, 5, 0)),
('2001-02-03T04:05:60.9Z', (2001, 2, 3, 4, 5, 60.9)),
('2001-01-31T04:05:06Z', (2001, 1, 31, 4, 5, 6)),
('2001-03-31T04:05:06Z', (2001, 3, 31, 4, 5, 6)),
('2001-04-30T04:05:06Z', (2001, 4, 30, 4, 5, 6)),
('2001-05-31T04:05:06Z', (2001, 5, 31, 4, 5, 6)),
('2001-06-30T04:05:06Z', (2001, 6, 30, 4, 5, 6)),
('2001-07-31T04:05:06Z', (2001, 7, 31, 4, 5, 6)),
('2001-08-31T04:05:06Z', (2001, 8, 31, 4, 5, 6)),
('2001-09-30T04:05:06Z', (2001, 9, 30, 4, 5, 6)),
('2001-10-31T04:05:06Z', (2001, 10, 31, 4, 5, 6)),
('2001-11-30T04:05:06Z', (2001, 11, 30, 4, 5, 6)),
('2001-12-31T04:05:06Z', (2001, 12, 31, 4, 5, 6)),
# Edgepoints (crossing boundaries)
('0001-01-01T07:08:23+07:08', (1, 1, 1, 0, 0, 23)),
('0001-01-01T07:07:42+07:08', (0, 12, 31, 23, 59, 42)),
('-0004-01-01T07:07:42+07:08', (-5, 12, 31, 23, 59, 42)),
('2001-03-01T07:07:42+07:08', (2001, 2, 28, 23, 59, 42)),
('2000-03-01T07:07:42+07:08', (2000, 2, 29, 23, 59, 42)),
('1900-03-01T07:07:42+07:08', (1900, 2, 28, 23, 59, 42)),
('---03-01T07:07:42+07:08', (N, 2, 28, 23, 59, 42)),
)
for t in (recurringInstantType,):
self.allTests(t, baddata, gooddata, parsedata)
def testTime(self):
baddata = \
(
'hello',
('hello',),
(1, 2, 3, 4, 5),
(1, 2, 3, 4, 5, 6, 7, 8),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
(1, 2, 'hello'),
(1, 2.5, 3),
(25, 0, 0),
(1, 60, 0),
(1, 0, 61),
)
gooddata = \
(
(1L, '00:00:01Z', (0, 0, 1.0)),
(1.5, '00:00:01.5Z', (0, 0, 1.5)),
(3661.5, '01:01:01.5Z', (1, 1, 1.5)),
(86399.75, '23:59:59.75Z', (23, 59, 59.75)),
((1,), '01:00:00Z', (1, 0, 0)),
((1, 2), '01:02:00Z', (1, 2, 0)),
((10L, 20.0, 30), '10:20:30Z', (10, 20, 30.0)),
)
parsedata = \
(
# Some strings that won't match the r.e.
('hello', N),
('00 00:01Z', N),
('07:O8:23Z', N),
# Invalid ranges
('24:08:23Z', N),
('04:60:23Z', N),
('04:05:61Z', N),
# Whitespace
(ws + '00:00:01Z' + ws, (0, 0, 1)),
# No timezones
('04:05:06.125', (4, 5, 6.125)),
# Non-zulu
('04:05:06.125-07:08', (11, 13, 6.125)),
('04:05:06.125+07:08', (-4, 57, 6.125)),
# Edgepoints (ranges)
('00:08:09Z', (0, 8, 9)),
('23:08:09Z', (23, 8, 9)),
('04:00:09Z', (4, 0, 9)),
('04:59:09Z', (4, 59, 9)),
('04:05:00Z', (4, 5, 0)),
('04:05:60.9Z', (4, 5, 60.9)),
# Edgepoints (crossing boundaries)
('07:08:23+07:08', (0, 0, 23)),
('07:07:42+07:08', (-1, 59, 42)),
)
for t in (timeType,):
self.allTests(t, baddata, gooddata, parsedata)
def testDate(self):
baddata = \
(
'hello',
('hello',),
(1, 2, 3, 4, 5),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
(1, 2, 3, 4, 5, 'hello'),
(1, 2.5, 3, 4, 5, 6),
(1, 2, 3.5),
(1, 0, 3),
(1, 13, 3),
(1, 1, 0),
(1, 1, 32),
(1, 2, 29),
(0, 2, 30),
(100, 2, 29),
(1, 3, 32),
(1, 4, 31),
(1, 5, 32),
(1, 6, 31),
(1, 7, 32),
(1, 8, 32),
(1, 9, 31),
(1, 10, 32),
(1, 11, 31),
(1, 12, 32),
)
gooddata = \
(
(1L, '1970-01-01Z', (1970, 1, 1)),
(1.5, '1970-01-01Z', (1970, 1, 1)),
((2,), '0002-01-01Z', (2, 1, 1)),
((2, 3), '0002-03-01Z', (2, 3, 1)),
((-2, 3, 4), '-0002-03-04Z', (-2, 3, 4)),
((2, 3, 4), '0002-03-04Z', (2, 3, 4)),
((10, 2, 3), '0010-02-03Z', (10, 2, 3)),
((100, 2, 3), '0100-02-03Z', (100, 2, 3)),
((1970, 2, 3), '1970-02-03Z', (1970, 2, 3)),
((-1970, 2, 3), '-1970-02-03Z', (-1970, 2, 3)),
((1970L, 2.0, 3.0), '1970-02-03Z', (1970, 2, 3)),
((11990, 1L, 2), '11990-01-02Z', (11990, 1, 2)),
((1e15, 1, 2), '1000000000000000-01-02Z', (1e15, 1, 2)),
((-1e15, 1, 2), '-1000000000000000-01-02Z', (-1e15, 1, 2)),
((1000000000000000L, 1, 2), '1000000000000000-01-02Z',
(1e15, 1, 2)),
((-1000000000000000L, 1, 2), '-1000000000000000-01-02Z',
(-1e15, 1, 2)),
)
parsedata = \
(
# Some strings that won't match the r.e.
('hello', N),
('1970 -01 -01Z', N),
('0001-02-03z', N),
# Invalid ranges
('2001-00-03Z', N),
('2001-13-03Z', N),
('2001-02-00Z', N),
('2001-02-29Z', N),
('2000-02-30Z', N),
('1900-02-29Z', N),
('2001-01-32Z', N),
('2001-03-32Z', N),
('2001-04-31Z', N),
('2001-05-32Z', N),
('2001-06-31Z', N),
('2001-07-32Z', N),
('2001-08-32Z', N),
('2001-09-31Z', N),
('2001-10-32Z', N),
('2001-11-31Z', N),
('2001-12-32Z', N),
# Whitespace
(ws + '1970-01-01Z' + ws, (1970, 1, 1)),
# No timezones
('11971-02-03', (11971, 2, 3)),
('1971-02-03', (1971, 2, 3)),
('-1971-02-03', (-1971, 2, 3)),
# Non-zulu
('11971-02-03-07:08', (11971, 2, 3)),
('11971-02-03+07:08', (11971, 2, 2)),
('-11971-02-03-07:08', (-11971, 2, 3)),
('-11971-02-03+07:08', (-11971, 2, 2)),
('1971-02-03-07:08', (1971, 2, 3)),
('1971-02-03+07:08', (1971, 2, 2)),
('-1971-02-03-07:08', (-1971, 2, 3)),
('-1971-02-03+07:08', (-1971, 2, 2)),
# Edgepoints (ranges)
('2001-01-03Z', (2001, 1, 3)),
('2001-12-03Z', (2001, 12, 3)),
('2001-02-01Z', (2001, 2, 1)),
('2001-02-28Z', (2001, 2, 28)),
('2000-02-29Z', (2000, 2, 29)),
('1900-02-28Z', (1900, 2, 28)),
('2001-01-31Z', (2001, 1, 31)),
('2001-03-31Z', (2001, 3, 31)),
('2001-04-30Z', (2001, 4, 30)),
('2001-05-31Z', (2001, 5, 31)),
('2001-06-30Z', (2001, 6, 30)),
('2001-07-31Z', (2001, 7, 31)),
('2001-08-31Z', (2001, 8, 31)),
('2001-09-30Z', (2001, 9, 30)),
('2001-10-31Z', (2001, 10, 31)),
('2001-11-30Z', (2001, 11, 30)),
('2001-12-31Z', (2001, 12, 31)),
# Edgepoints (crossing boundaries)
('0001-01-01+07:08', (0, 12, 31)),
('-0004-01-01+07:08', (-5, 12, 31)),
('2001-03-01+07:08', (2001, 2, 28)),
('2000-03-01+07:08', (2000, 2, 29)),
('1900-03-01+07:08', (1900, 2, 28)),
)
for t in (dateType,):
self.allTests(t, baddata, gooddata, parsedata)
def testGYearMonth(self):
baddata = \
(
'hello',
('hello',),
(1, 2, 3),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
(1, 2, 3.5),
(1, 'hello'),
(1, 2.5),
(1, 0),
(1, 13),
)
gooddata = \
(
(1L, '1970-01Z', (1970, 1)),
(1.5, '1970-01Z', (1970, 1)),
((2,), '0002-01Z', (2, 1)),
((2, 3), '0002-03Z', (2, 3)),
((-2, 3), '-0002-03Z', (-2, 3)),
((10, 2), '0010-02Z', (10, 2)),
((100, 2), '0100-02Z', (100, 2)),
((1970, 2), '1970-02Z', (1970, 2)),
((-1970, 2), '-1970-02Z', (-1970, 2)),
((1970L, 2.0), '1970-02Z', (1970, 2)),
((11990, 1L), '11990-01Z', (11990, 1)),
((1e15, 1), '1000000000000000-01Z', (1e15, 1)),
((-1e15, 1), '-1000000000000000-01Z', (-1e15, 1)),
((1000000000000000L, 1), '1000000000000000-01Z', (1e15, 1)),
((-1000000000000000L, 1), '-1000000000000000-01Z', (-1e15, 1)),
)
parsedata = \
(
# Some strings that won't match the r.e.
('hello', N),
('1970 -01Z', N),
('0001-02z', N),
# Invalid ranges
('2001-00Z', N),
('2001-13Z', N),
# Whitespace
(ws + '1970-01Z' + ws, (1970, 1)),
# No timezones
('11971-02', (11971, 2)),
('1971-02', (1971, 2)),
('-1971-02', (-1971, 2)),
# Non-zulu
('11971-02-07:08', (11971, 2)),
('11971-02+07:08', (11971, 1)),
('-11971-02-07:08', (-11971, 2)),
('-11971-02+07:08', (-11971, 1)),
('1971-02-07:08', (1971, 2)),
('1971-02+07:08', (1971, 1)),
('-1971-02-07:08', (-1971, 2)),
('-1971-02+07:08', (-1971, 1)),
# Edgepoints (ranges)
('2001-01Z', (2001, 1)),
('2001-12Z', (2001, 12)),
# Edgepoints (crossing boundaries)
('0001-01+07:08', (0, 12)),
('-0004-01+07:08', (-5, 12)),
('2001-03+07:08', (2001, 2)),
('2000-03+07:08', (2000, 2)),
('1900-03+07:08', (1900, 2)),
)
for t in (gYearMonthType,):
self.allTests(t, baddata, gooddata, parsedata)
def testGYearAndYear(self):
baddata = \
(
'hello',
('hello',),
(1, 2),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
(2.5,),
)
gooddata = \
(
(1L, '0001Z', 1),
(10, '0010Z', 10),
(100, '0100Z', 100),
(1970, '1970Z', 1970),
(-1970, '-1970Z', -1970),
(1970L, '1970Z', 1970),
(11990.0, '11990Z', 11990),
(1e15, '1000000000000000Z', 1e15),
(-1e15, '-1000000000000000Z', -1e15),
(1000000000000000L, '1000000000000000Z', 1e15),
(-1000000000000000L, '-1000000000000000Z', -1e15),
)
parsedata = \
(
# Some strings that won't match the r.e.
('hello', N),
('197OZ', N),
('0001z', N),
# Whitespace
(ws + '1970Z' + ws, 1970),
# No timezones
('11971', 11971),
('1971', 1971),
('-1971', -1971),
# Non-zulu
('11971-07:08', 11971),
('11971+07:08', 11970),
('-11971-07:08', -11971),
('-11971+07:08', -11972),
('1971-07:08', 1971),
('1971+07:08', 1970),
('-1971-07:08', -1971),
('-1971+07:08', -1972),
# Edgepoints (crossing boundaries)
('0001+07:08', 0),
('-0004+07:08', -5),
)
for t in (gYearType, yearType):
self.allTests(t, baddata, gooddata, parsedata)
def testCentury(self):
baddata = \
(
'hello',
('hello',),
(1, 2),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
(2.5,),
)
gooddata = \
(
(1L, '01Z', 1),
(10, '10Z', 10),
(100, '100Z', 100),
(19, '19Z', 19),
(-19, '-19Z', -19),
(19L, '19Z', 19),
(119.0, '119Z', 119),
(1e15, '1000000000000000Z', 1e15),
(-1e15, '-1000000000000000Z', -1e15),
(1000000000000000L, '1000000000000000Z', 1e15),
(-1000000000000000L, '-1000000000000000Z', -1e15),
)
parsedata = \
(
# Some strings that won't match the r.e.
('hello', N),
('197OZ', N),
('0001z', N),
# Whitespace
(ws + '1970Z' + ws, 1970),
# No timezones
('11971', 11971),
('1971', 1971),
('-1971', -1971),
# Non-zulu
('11971-07:08', 11971),
('11971+07:08', 11970),
('-11971-07:08', -11971),
('-11971+07:08', -11972),
('1971-07:08', 1971),
('1971+07:08', 1970),
('-1971-07:08', -1971),
('-1971+07:08', -1972),
# Edgepoints (crossing boundaries)
('0001+07:08', 0),
('-0004+07:08', -5),
)
for t in (centuryType,):
self.allTests(t, baddata, gooddata, parsedata)
def testGMonthDayAndRecurringDate(self):
baddata = \
(
'hello',
('hello',),
(3, 4, 5),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
(4, 5, 'hello'),
(2.5, 3),
(0, 3),
(13, 3),
(1, 0),
(1, 32),
(2, 29),
(3, 32),
(4, 31),
(5, 32),
(6, 31),
(7, 32),
(8, 32),
(9, 31),
(10, 32),
(11, 31),
(12, 32),
)
gooddata = \
(
(1L, '--01-01Z', (1, 1)),
(1.5, '--01-01Z', (1, 1)),
((2,), '--02-01Z', (2, 1)),
((2, 3), '--02-03Z', (2, 3)),
((10, 2), '--10-02Z', (10, 2)),
)
parsedata = \
(
# Some strings that won't match the r.e.
('hello', N),
('--01 -01Z', N),
('--02-03z', N),
# Invalid ranges
('--00-03Z', N),
('--13-03Z', N),
('--01-32Z', N),
('--02-00Z', N),
('--02-29Z', N),
('--03-32Z', N),
('--04-31Z', N),
('--05-32Z', N),
('--06-31Z', N),
('--07-32Z', N),
('--08-32Z', N),
('--09-31Z', N),
('--10-32Z', N),
('--11-31Z', N),
('--12-32Z', N),
# Whitespace
(ws + '--01-01Z' + ws, (1, 1)),
# No timezones
('--02-03', (2, 3)),
# Non-zulu
('--02-03-07:08', (2, 3)),
('--02-03+07:08', (2, 2)),
# Edgepoints (ranges)
('--01-03Z', (1, 3)),
('--12-03Z', (12, 3)),
('--01-31Z', (1, 31)),
('--02-01Z', (2, 1)),
('--02-28Z', (2, 28)),
('--03-31Z', (3, 31)),
('--04-30Z', (4, 30)),
('--05-31Z', (5, 31)),
('--06-30Z', (6, 30)),
('--07-31Z', (7, 31)),
('--08-31Z', (8, 31)),
('--09-30Z', (9, 30)),
('--10-31Z', (10, 31)),
('--11-30Z', (11, 30)),
('--12-31Z', (12, 31)),
# Edgepoints (crossing boundaries)
('--01-01+07:08', (12, 31)),
('--03-01+07:08', (2, 28)),
)
for t in (gMonthDayType, recurringDateType):
self.allTests(t, baddata, gooddata, parsedata)
def testGMonthAndMonth(self):
baddata = \
(
'hello',
('hello',),
(3, 4,),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
(2.5,),
(0,),
(13,),
)
gooddata = \
(
(1L, '--01--Z', 1),
((2,), '--02--Z', 2),
((10,), '--10--Z', 10),
)
parsedata = \
(
# Some strings that won't match the r.e.
('hello', N),
('--01 --Z', N),
('--03--z', N),
# Invalid ranges
('--00--Z', N),
('--13--Z', N),
# Whitespace
(ws + '--01--Z' + ws, 1),
# No timezones
('--03--', 3),
# Non-zulu
('--03---07:08', 3),
('--03--+07:08', 2),
# Edgepoints (ranges)
('--01--Z', 1),
('--12--Z', 12),
# Edgepoints (crossing boundaries)
('--01--+07:08', 12),
('--12---07:08', 12),
)
for t in (gMonthType, monthType):
self.allTests(t, baddata, gooddata, parsedata)
def testGDayAndRecurringDay(self):
baddata = \
(
'hello',
('hello',),
(3, 4,),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
(2.5,),
(0,),
(32,),
)
gooddata = \
(
(1L, '---01Z', 1),
((2,), '---02Z', 2),
((10,), '---10Z', 10),
)
parsedata = \
(
# Some strings that won't match the r.e.
('hello', N),
('---01 Z', N),
('---03z', N),
# Invalid ranges
('---00Z', N),
('---32Z', N),
# Whitespace
(ws + '---01Z' + ws, 1),
# No timezones
('---03', 3),
# Non-zulu
('---03-07:08', 3),
('---03+07:08', 2),
# Edgepoints (ranges)
('---01Z', 1),
('---31Z', 31),
# Edgepoints (crossing boundaries)
('---01+07:08', 31),
('---31-07:08', 31),
)
for t in (gDayType, recurringDayType):
self.allTests(t, baddata, gooddata, parsedata)
def testInteger(self):
# First some things that shouldn't be valid
test = ('hello', 3.14, (), [], {})
t = integerType
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (10, 23L, 1111111111111111111111111111111111111111111111111111L):
x = integerType(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %d, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', N), ('3.14', N), ('10 000', N),
('1', 1),
('123456789012345678901234567890', 123456789012345678901234567890L),
(ws + '12' + ws, 12))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4],
i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def testNonPositiveInteger(self):
# First some things that shouldn't be valid
test = ('hello', 3.14, (), [], {}, 1, 23)
for t in (nonPositiveIntegerType, non_Positive_IntegerType):
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a t with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (0, -23L, -1111111111111111111111111111111111111111111111111L):
x = t(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %d, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
# Now test parsing, both valid and invalid
test = (('hello', N), ('3.14', N), ('-10 000', N), ('1', N),
('0', 0),
('-1', -1),
('-123456789012345678901234567890', -123456789012345678901234567890L),
(ws + '-12' + ws, -12))
for i in test:
try:
if t == nonPositiveIntegerType:
n = t.__name__[:-4]
else:
n = 'non-positive-integer'
z = parseSOAPRPC(self.build_xml(t._validURIs[0], n, i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def testNegativeInteger(self):
# First some things that shouldn't be valid
test = ('hello', 3.14, (), [], {}, 0, 23)
for t in (negativeIntegerType, negative_IntegerType):
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (-1, -23L, -111111111111111111111111111111111111111111111111L):
x = t(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %d, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
# Now test parsing, both valid and invalid
test = (('hello', N), ('3.14', N), ('-10 000', N), ('1', N),
('0', N),
('-1', -1),
('-123456789012345678901234567890', -123456789012345678901234567890L),
(ws + '-12' + ws, -12))
for i in test:
try:
if t == negativeIntegerType:
n = t.__name__[:-4]
else:
n = 'negative-integer'
z = parseSOAPRPC(self.build_xml(t._validURIs[0], n, i[0]))
if z != i[1]:
raise AssertionError, "expected %s, got %s" % (i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def testLong(self):
# First some things that shouldn't be valid
test = ('hello', 3.14, (), [], {},
-9223372036854775809L, 9223372036854775808L)
t = longType
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (-1, -23L, -9223372036854775808L, 9223372036854775807L):
x = t(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %d, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', N), ('3.14', N), ('-10 000', N),
('-9223372036854775809', N), ('9223372036854775808', N),
('-1', -1), ('0', 0), ('1', 1),
('-9223372036854775808', -9223372036854775808L),
('9223372036854775807', 9223372036854775807L),
(ws + '-12' + ws, -12))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def testInt(self):
# First some things that shouldn't be valid
test = ('hello', 3.14, (), [], {}, -2147483649L, 2147483648L)
t = intType
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (-1, -23L, -2147483648L, 2147483647):
x = intType(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %d, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', N), ('3.14', N), ('-10 000', N),
('-2147483649', N), ('2147483648', N),
('-1', -1), ('0', 0), ('1', 1),
('-2147483648', -2147483648L),
('2147483647', 2147483647),
(ws + '-12' + ws, -12))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def testShort(self):
# First some things that shouldn't be valid
test = ('hello', 3.14, (), [], {}, -32769, 32768)
t = shortType
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (-1, -23L, -32768, 32767):
x = t(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %d, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', N), ('3.14', N), ('-10 000', N),
('-32769', N), ('32768', N),
('-1', -1), ('0', 0), ('1', 1),
('-32768', -32768),
('32767', 32767),
(ws + '-12' + ws, -12))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def testByte(self):
# First some things that shouldn't be valid
test = ('hello', 3.14, (), [], {}, -129, 128)
t = byteType
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (-1, -23L, -128, 127):
x = t(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %d, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', N), ('3.14', N), ('-10 000', N),
('-129', N), ('128', N),
('-1', -1), ('0', 0), ('1', 1),
('-128', -128),
('127', 127),
(ws + '-12' + ws, -12))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def testNonNegativeInteger(self):
# First some things that shouldn't be valid
test = ('hello', 3.14, (), [], {}, -42, -1)
for t in (nonNegativeIntegerType, non_Negative_IntegerType):
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (0, 1, 23L, 111111111111111111111111111111111111111111111111L):
x = t(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %d, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', N), ('3.14', N), ('-10 000', N), ('-1', N),
('0', 0),
('1', 1),
('123456789012345678901234567890', 123456789012345678901234567890L),
(ws + '12' + ws, 12))
for i in test:
try:
if t == nonNegativeIntegerType:
n = t.__name__[:-4]
else:
n = 'non-negative-integer'
z = parseSOAPRPC(self.build_xml(t._validURIs[0], n, i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def testUnsignedLong(self):
# First some things that shouldn't be valid
test = ('hello', 3.14, (), [], {}, -42, -1, 18446744073709551616L)
t = unsignedLongType
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (0, 23L, 18446744073709551615L):
x = t(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %d, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', N), ('3.14', N), ('-10 000', N), ('-1', N),
('18446744073709551616', N),
('0', 0), ('1', 1),
('18446744073709551615', 18446744073709551615L),
(ws + '12' + ws, 12))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def testUnsignedInt(self):
# First some things that shouldn't be valid
test = ('hello', 3.14, (), [], {}, -42, -1, 4294967296L)
t = unsignedIntType
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (0, 23L, 4294967295L):
x = t(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %d, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', N), ('3.14', N), ('-10 000', N), ('-1', N),
('4294967296', N),
('0', 0), ('1', 1),
('4294967295', 4294967295L),
(ws + '12' + ws, 12))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def testUnsignedShort(self):
# First some things that shouldn't be valid
test = ('hello', 3.14, (), [], {}, -42, -1, 65536)
t = unsignedShortType
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (0, 23L, 65535):
x = t(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %d, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', N), ('3.14', N), ('-10 000', N), ('-1', N),
('65536', N),
('0', 0), ('1', 1),
('65535', 65535),
(ws + '12' + ws, 12))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def testUnsignedByte(self):
# First some things that shouldn't be valid
test = ('hello', 3.14, (), [], {}, -42, -1, 256)
t = unsignedByteType
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a %s with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (0, 23L, 255):
x = t(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %d, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', N), ('3.14', N), ('-10 000', N), ('-1', N),
('256', N),
('0', 0), ('1', 1),
('255', 255),
(ws + '12' + ws, 12))
for i in test:
try:
z = parseSOAPRPC(self.build_xml(t._validURIs[0], t.__name__[:-4], i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def testPositiveInteger(self):
# First some things that shouldn't be valid
test = ('hello', 3.14, (), [], {}, -42, -1, 0)
for t in (positiveIntegerType, positive_IntegerType):
for i in test:
try:
t(i)
raise AssertionError, \
"instantiated a t with a bad value (%s)" % \
(t.__name__, repr(i))
except AssertionError:
raise
except ValueError:
pass
# Now some things that should
for i in (1, 23L, 1111111111111111111111111111111111111111111111111111L):
x = t(i)
d = x._marshalData()
if d != str(i):
raise AssertionError, "expected %d, got %s" % (i, d)
y = buildSOAP(x)
z = parseSOAPRPC(y)
if z != i:
raise AssertionError, "expected %s, got %s" % (repr(i), repr(z))
# Now test parsing, both valid and invalid
test = (('hello', N), ('3.14', N), ('-10 000', N), ('-1', N),
('0', N), ('1', 1),
('123456789012345678901234567890', 123456789012345678901234567890L),
(ws + '12' + ws, 12))
for i in test:
try:
if t == positiveIntegerType:
n = t.__name__[:-4]
else:
n = 'positive-integer'
z = parseSOAPRPC(self.build_xml(t._validURIs[0], n, i[0]))
if z != i[1]:
raise AssertionError, "%s: expected %s, got %s" % \
(i[0], i[1], repr(z))
except AssertionError:
raise
except:
if i[1] != N:
raise AssertionError, \
"parsing %s as %s threw exception %s:%s" % \
(i[0], t.__name__, sys.exc_info()[0], sys.exc_info()[1])
def testUntyped(self):
# Make sure untypedType really isn't typed
a = stringType('hello', name = 'a')
b = untypedType('earth', name = 'b')
x = buildSOAP((a, b))
#print "x=",x
self.failUnless(x.find('<a xsi:type="xsd:string" SOAP-ENC:root="1">hello</a>') != -1)
self.failUnless(x.find('<b SOAP-ENC:root="1">earth</b>') != -1)
# Now some Array tests
def testArray(self):
env = '''<?xml version="1.0" encoding="UTF-8"?>
<SOAP-ENV:Envelope SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:xsd="http://www.w3.org/1999/XMLSchema" xmlns:xsd2="http://www.w3.org/2000/10/XMLSchema" xmlns:xsd3="http://www.w3.org/2001/XMLSchema" xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance" xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/">
%s
</SOAP-ENV:Envelope>'''
xml = env % '''<SOAP-ENV:Body>
<_1 SOAP-ENC:arrayType="xsd:int[4]" SOAP-ENC:offset="[2]" xsi:type="SOAP-ENC:Array">
<_2 SOAP-ENC:arrayType="xsd:int[2]" xsi:type="SOAP-ENC:Array">
<item>1</item>
<item>2</item>
</_2>
<_3 SOAP-ENC:arrayType="xsd:int[2]" xsi:type="SOAP-ENC:Array">
<item>3</item>
<item>4</item>
</_3>
</_1>
</SOAP-ENV:Body>'''
x = parseSOAPRPC(xml)
self.assertEquals( x , [None, None, [1, 2], [3, 4]])
xml = env % '''<SOAP-ENV:Body>
<_1 SOAP-ENC:arrayType="xsd:int[3,4,2]" SOAP-ENC:offset="[17]" xsi:type="SOAP-ENC:Array">
<item>1</item>
<item>2</item>
<item>3</item>
<item>4</item>
<item>5</item>
<item>6</item>
<item>7</item>
</_1>
</SOAP-ENV:Body>'''
x = parseSOAPRPC(xml)
self.assertEquals( x , [
[[None, None], [None, None], [None, None], [None, None]],
[[None, None], [None, None], [None, None], [None, None]],
[[None, 1], [2, 3], [4, 5], [6, 7]]
])
xml = env % '''<SOAP-ENV:Body>
<_1 SOAP-ENC:arrayType="xsd:int[3,4,2]" xsi:type="SOAP-ENC:Array">
<item SOAP-ENC:position="[17]">-17</item>
<item SOAP-ENC:position="[13]">13</item>
<item SOAP-ENC:position="[22]">-22</item>
<item SOAP-ENC:position="[1]">1</item>
<item SOAP-ENC:position="[17]">17</item>
<item SOAP-ENC:position="[23]">23</item>
<item SOAP-ENC:position="[6]">6</item>
</_1>
</SOAP-ENV:Body>'''
x = parseSOAPRPC(xml)
self.assertEquals( x , [
[[None, 1L], [None, None], [None, None], [6L, None]],
[[None, None], [None, None], [None, 13L], [None, None]],
[[None, 17L], [None, None], [None, None], [-22L, 23L]]
])
xml = env % '''<SOAP-ENV:Body>
<_1 SOAP-ENC:arrayType="xsd:int[4]" SOAP-ENC:offset="[3]" xsi:type="SOAP-ENC:Array">
<item SOAP-ENC:position="[2]">2</item>
<item SOAP-ENC:position="[0]">0</item>
<item SOAP-ENC:position="[1]">1</item>
<item SOAP-ENC:position="[3]">3</item>
</_1>
</SOAP-ENV:Body>'''
x = parseSOAPRPC(xml)
self.assertEquals( x , [0, 1, 2, 3])
xml = env % '''<SOAP-ENV:Body>
<_1 SOAP-ENC:arrayType="xsd:int[2,3,4]" SOAP-ENC:offset="[23]" xsi:type="SOAP-ENC:Array">
</_1>
</SOAP-ENV:Body>'''
x = parseSOAPRPC(xml)
self.assertEquals( x , [
[
[None, None, None, None],
[None, None, None, None],
[None, None, None, None],
],
[
[None, None, None, None],
[None, None, None, None],
[None, None, None, None],
]
])
xml = env % '''<SOAP-ENV:Body>
<_1 SOAP-ENC:arrayType="xsd:int[4]" SOAP-ENC:offset="[3]" xsi:type="SOAP-ENC:Array">
<item>2</item>
<item>3</item>
</_1>
</SOAP-ENV:Body>'''
try:
x = parseSOAPRPC(xml)
raise AssertionError, "full array parsed"
except AssertionError:
raise
except:
pass
xml = env % '''<SOAP-ENV:Body>
<_1 SOAP-ENC:arrayType="xsd:int[2,0,4]" xsi:type="SOAP-ENC:Array">
</_1>
</SOAP-ENV:Body>'''
try:
x = parseSOAPRPC(xml)
raise AssertionError, "array with bad dimension (0) parsed"
except AssertionError:
raise
except:
pass
xml = env % '''<SOAP-ENV:Body>
<_1 SOAP-ENC:arrayType="xsd:int[2,3,-4]" xsi:type="SOAP-ENC:Array">
</_1>
</SOAP-ENV:Body>'''
try:
x = parseSOAPRPC(xml)
raise AssertionError, "array with bad dimension (negative) parsed"
except AssertionError:
raise
except:
pass
xml = env % '''<SOAP-ENV:Body>
<_1 SOAP-ENC:arrayType="xsd:int[2,3,4.4]" xsi:type="SOAP-ENC:Array">
</_1>
</SOAP-ENV:Body>'''
try:
x = parseSOAPRPC(xml)
raise AssertionError, "array with bad dimension (non-integral) parsed"
except AssertionError:
raise
except:
pass
xml = env % '''<SOAP-ENV:Body>
<_1 SOAP-ENC:arrayType="xsd:int[2,hello,4]" xsi:type="SOAP-ENC:Array">
</_1>
</SOAP-ENV:Body>'''
try:
x = parseSOAPRPC(xml)
raise AssertionError, "array with bad dimension (non-numeric) parsed"
except AssertionError:
raise
except:
pass
xml = env % '''<SOAP-ENV:Body>
<_1 SOAP-ENC:arrayType="xsd:int[2,3,4]" SOAP-ENC:offset="[-4]" xsi:type="SOAP-ENC:Array">
</_1>
</SOAP-ENV:Body>'''
try:
x = parseSOAPRPC(xml)
raise AssertionError, "array with too large offset parsed"
except AssertionError:
raise
except:
pass
xml = env % '''<SOAP-ENV:Body>
<_1 SOAP-ENC:arrayType="xsd:int[2,3,4]" SOAP-ENC:offset="[24]" xsi:type="SOAP-ENC:Array">
</_1>
</SOAP-ENV:Body>'''
try:
x = parseSOAPRPC(xml)
raise AssertionError, "array with too large offset parsed"
except AssertionError:
raise
except:
pass
xml = env % '''<SOAP-ENV:Body>
<_1 SOAP-ENC:arrayType="xsd:int[2,3,4]" xsi:type="SOAP-ENC:Array">
<item SOAP-ENC:position="0">2</item>
<item>3</item>
</_1>
</SOAP-ENV:Body>'''
try:
x = parseSOAPRPC(xml)
raise AssertionError, "full array parsed"
except AssertionError:
raise
except:
pass
xml = env % '''<SOAP-ENV:Body>
<myFavoriteNumbers type="SOAP-ENC:Array" SOAP-ENC:arrayType="xsd:int[2]">
<number>3</number>
<number>4</number>
</myFavoriteNumbers>
</SOAP-ENV:Body>'''
x = parseSOAPRPC(xml)
self.assertEquals( x , [3, 4])
xml = env % '''<SOAP-ENV:Body>
<SOAP-ENC:Array SOAP-ENC:arrayType="xsd:ur-type[4]">
<thing xsi:type="xsd:int">12345</thing>
<thing xsi:type="xsd:decimal">6.789</thing>
<thing xsi:type="xsd:string">Of Mans First Disobedience, and the Fruit
Of that Forbidden Tree, whose mortal tast
Brought Death into the World, and all our woe,</thing>
<thing xsi:type="xsd2:uriReference">
http://www.dartmouth.edu/~milton/reading_room/
</thing>
</SOAP-ENC:Array>
</SOAP-ENV:Body>'''
x = parseSOAPRPC(xml)
self.assertEquals( x , [12345, 6.789, '''Of Mans First Disobedience, and the Fruit
Of that Forbidden Tree, whose mortal tast
Brought Death into the World, and all our woe,''',
'http://www.dartmouth.edu/~milton/reading_room/'])
xml = env % '''<SOAP-ENV:Body>
<SOAP-ENC:Array SOAP-ENC:arrayType="xyz:Order[2]">
<Order>
<Product>Apple</Product>
<Price>1.56</Price>
</Order>
<Order>
<Product>Peach</Product>
<Price>1.48</Price>
</Order>
</SOAP-ENC:Array>
</SOAP-ENV:Body>'''
#x = parseSOAPRPC(xml)
#print "x=",x
xml = env % '''<SOAP-ENV:Body>
<SOAP-ENC:Array SOAP-ENC:arrayType="xsd:string[3]">
<item href="#array-1"/>
<item href="#array-2"/>
<item href="#array-2"/>
</SOAP-ENC:Array>
<SOAP-ENC:Array id="array-1" SOAP-ENC:arrayType="xsd:string[3]">
<item>r1c1</item>
<item>r1c2</item>
<item>r1c3</item>
</SOAP-ENC:Array>
<SOAP-ENC:Array id="array-2" SOAP-ENC:arrayType="xsd:string[2]">
<item>r2c1</item>
<item>r2c2</item>
</SOAP-ENC:Array>
</SOAP-ENV:Body>'''
x = parseSOAPRPC(xml)
self.assertEquals( x , [['r1c1', 'r1c2', 'r1c3'],
['r2c1', 'r2c2'], ['r2c1', 'r2c2']])
xml = env % '''<SOAP-ENV:Body>
<SOAP-ENC:Array SOAP-ENC:arrayType="xsd:string[2,3]">
<item>r1c1</item>
<item>r1c2</item>
<item>r1c3</item>
<item>r2c1</item>
<item>r2c2</item>
<item>r2c3</item>
</SOAP-ENC:Array>
</SOAP-ENV:Body>'''
x = parseSOAPRPC(xml)
self.assertEquals( x , [['r1c1', 'r1c2', 'r1c3'], ['r2c1', 'r2c2', 'r2c3']])
xml = env % '''<SOAP-ENV:Body>
<SOAP-ENC:Array SOAP-ENC:arrayType="xsd:string[5]" SOAP-ENC:offset="[2]">
<item>The third element</item>
<item>The fourth element</item>
</SOAP-ENC:Array>
</SOAP-ENV:Body>'''
x = parseSOAPRPC(xml)
self.assertEquals( x , [None, None, 'The third element', 'The fourth element', None])
xml = env % '''<SOAP-ENV:Body>
<SOAP-ENC:Array SOAP-ENC:arrayType="xsd:string[,][4]">
<SOAP-ENC:Array href="#array-1" SOAP-ENC:position="[2]"/>
</SOAP-ENC:Array>
<SOAP-ENC:Array id="array-1" SOAP-ENC:arrayType="xsd:string[10,10]">
<item SOAP-ENC:position="[2,2]">Third row, third col</item>
<item SOAP-ENC:position="[7,2]">Eighth row, third col</item>
</SOAP-ENC:Array>
</SOAP-ENV:Body>'''
x = parseSOAPRPC(xml)
# Example using key data
def testKeyData(self):
xml = '''<?xml version="1.0" encoding="UTF-8"?>
<soap:Envelope xmlns:dsig="http://www.w3.org/2000/09/xmldsig#" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/" xmlns:soapenc="http://schemas.xmlsoap.org/soap/encoding/" xmlns:xsd="http://www.w3.org/1999/XMLSchema" xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance">
<soap:Body>
<xkms:RegisterResult xmlns:xkms="http://www.xkms.org/schema/xkms-2001-01-20">
<xkms:Result>Success</xkms:Result>
<xkms:Answer soapenc:arrayType="KeyBinding[1]">
<xkms:KeyBinding>
<xkms:Status>Valid</xkms:Status>
<xkms:KeyID>mailto:actzerotestkeyname</xkms:KeyID>
<dsig:KeyInfo>
<dsig:X509Data>
<dsig:X509Certificate>MIIDPjCCAqegAwIBAgIEOroMvDANBgkqhkiG9w0BAQUFADAxMQswCQYDVQQGEwJVI3nlMkH84ZdPKIyz60sNcVEwJ8kF+B6ZVNimCF+r7BWgLi/Dolce5CpbfMMyexZ+UQEMADrc7331eYS891KXSDQx</dsig:X509Certificate>
</dsig:X509Data>
<dsig:KeyName>mailto:actzerotestkeyname</dsig:KeyName>
<dsig:KeyValue>
<dsig:RSAKeyValue>
<dsig:Modulus>wgmV2FY6MBKvtaMmCvCoNi/0hycZkiPKC2PXjRLJKFJ5wjNfF+vWsQQUXxOKUQnu
HjJqRkx90jJvnEzW3j9FlZFQcZTfJbE0v6BXhhSre2kZvkgcOERmDMeMs//oEA4u
epnedUwrkPzedWU9AL7c/oN7rk65UuPWf7V8c/4E9bc=</dsig:Modulus>
<dsig:Exponent>AQAB</dsig:Exponent>
</dsig:RSAKeyValue>
</dsig:KeyValue>
</dsig:KeyInfo>
</xkms:KeyBinding>
</xkms:Answer>
<xkms:Private>9GKuRC3ISwE9aEatzDKW0WIp+P/ufOvCxy9d5jVglLaRiTTIelHoGKCE6cDG62HYOu/3ebce6M7Z6LX6l1J9pB5PUx+f2DaMYYEGuOtNA7/ei5Ga/mibRBCehQIcN6FF6ESFOwAJBRLajj+orgYSy0u1sTCla0V4nSBrYA2H6lx8mD3qfDJ4hie7nU0YqZxy50F9f9UxXKIVSeutyIIBjWDDKv0kVpKy7OUerOaZXOW6HBohXuV74kXMUZu+MpLIkMHOrhJeo+edfhmeFuw4kCo5it6GkrOKrGs6zo1hSxWp7uuvKAPbvUrumC6sTsTxAUg4KTGq85IUnBTYI40Q9TZtzMcONtrWfIIF23/7NJyOmygBaFa4wFqHxe7j2gSWCQRv2fPwXo/AAJTeKwsUIY8OgmANHHbFVqJEeg27jbCuSaQFxWD7ms240YurTb55HBLk6JSufDl0CUbxoUgjrDB++gUb8oalroWDIb5NcZ94QER+HiTQfB11HcPDHvONnzk/n+iF+Mcri53ZbAButnfp2x87sh6RedeiUUWruYA4eonRq5+aj2I9cIrGLQaLemna1AQ+PyD2SMelBLukfR7GUc7zaSPjPJh2W/aYAJSyjM98g6ABNntdfhuf+6jRYnYFqSXZL1W1JPf92OMOfwfuXTE2K68sNwCRhcbHDLM=</xkms:Private>
</xkms:RegisterResult>
</soap:Body>
</soap:Envelope>'''
x = parseSOAPRPC(xml)
def testZeroLengthTypedArray(self):
"""
Test that zero length typed arrays maintain thier type information when
converted to a SOAP message.
"""
empty_int = typedArrayType(typed="int")
empty_int_message = buildSOAP( empty_int )
self.assertNotEquals( re.search("xsd:int\[0\]", empty_int_message),
None )
if __name__ == '__main__':
print """
NOTE: The 'testArray' test will fail because 'referenced' elements are
included in the return object. This is a known shortcoming of
the current version of SOAPpy.
All other tests should succeed.
"""
unittest.main()
| Python |
#!/usr/bin/env python
import time
from SOAPpy import SOAP
srv = SOAP.SOAPProxy('http://localhost:10080/')
for p in ('good param', 'ok param'):
ret = srv.badparam(p)
if isinstance(ret, SOAP.faultType):
print ret
else:
print 'ok'
dt = SOAP.dateTimeType(time.localtime(time.time()))
print srv.dt(dt)
| Python |
import http_server
from SOAPpy.SOAP import *
Fault = faultType
import string, sys
Config = SOAPConfig(debug=1)
class soap_handler:
def __init__(self, encoding='UTF-8', config=Config, namespace=None):
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.config = config
self.encoding = encoding
def match (self, request):
return 1
def handle_request (self, request):
[path, params, query, fragment] = request.split_uri()
if request.command == 'post':
request.collector = collector(self, request)
else:
request.error(400)
def continue_request(self, data, request):
# Everthing that follows is cripped from do_POST().
if self.config.debug:
print "\n***RECEIVING***\n", data, "*" * 13 + "\n"
sys.stdout.flush()
try:
r, header, body = parseSOAPRPC(data, header=1, body=1)
method = r._name
args = r._aslist
kw = r._asdict
ns = r._ns
resp = ""
# For faults messages
if ns:
nsmethod = "%s:%s" % (ns, method)
else:
nsmethod = method
try:
# First look for registered functions
if self.funcmap.has_key(ns) and \
self.funcmap[ns].has_key(method):
f = self.funcmap[ns][method]
else: # Now look at registered objects
# Check for nested attributes
if method.find(".") != -1:
t = self.objmap[ns]
l = method.split(".")
for i in l:
t = getattr(t,i)
f = t
else:
f = getattr(self.objmap[ns], method)
except:
if self.config.debug:
import traceback
traceback.print_exc ()
resp = buildSOAP(Fault("%s:Client" % NS.ENV_T,
"No method %s found" % nsmethod,
"%s %s" % tuple(sys.exc_info()[0:2])),
encoding = self.encoding, config = self.config)
status = 500
else:
try:
# If it's wrapped to indicate it takes keywords
# send it keywords
if header:
x = HeaderHandler(header)
if isinstance(f,MethodSig):
c = None
if f.context: # Build context object
c = SOAPContext(header, body, d, self.connection, self.headers,
self.headers["soapaction"])
if f.keywords:
tkw = {}
# This is lame, but have to de-unicode keywords
for (k,v) in kw.items():
tkw[str(k)] = v
if c:
tkw["_SOAPContext"] = c
fr = apply(f,(),tkw)
else:
if c:
fr = apply(f,args,{'_SOAPContext':c})
else:
fr = apply(f,args,{})
else:
fr = apply(f,args,{})
if type(fr) == type(self) and isinstance(fr, voidType):
resp = buildSOAP(kw = {'%sResponse' % method:fr},
encoding = self.encoding,
config = self.config)
else:
resp = buildSOAP(kw =
{'%sResponse' % method:{'Result':fr}},
encoding = self.encoding,
config = self.config)
except Fault, e:
resp = buildSOAP(e, config = self.config)
status = 500
except:
if self.config.debug:
import traceback
traceback.print_exc ()
resp = buildSOAP(Fault("%s:Server" % NS.ENV_T, \
"Method %s failed." % nsmethod,
"%s %s" % tuple(sys.exc_info()[0:2])),
encoding = self.encoding,
config = self.config)
status = 500
else:
status = 200
except Fault,e:
resp = buildSOAP(e, encoding = self.encoding,
config = self.config)
status = 500
except:
# internal error, report as HTTP server error
if self.config.debug:
import traceback
traceback.print_exc ()
request.error(500)
#self.send_response(500)
#self.end_headers()
else:
request['Content-Type'] = 'text/xml; charset="%s"' % self.encoding
request.push(resp)
request.done()
# got a valid SOAP response
#self.send_response(status)
#self.send_header("Content-type",
# 'text/xml; charset="%s"' % self.encoding)
#self.send_header("Content-length", str(len(resp)))
#self.end_headers()
if self.config.debug:
print "\n***SENDING***\n", resp, "*" * 13 + "\n"
sys.stdout.flush()
"""
# We should be able to shut down both a regular and an SSL
# connection, but under Python 2.1, calling shutdown on an
# SSL connections drops the output, so this work-around.
# This should be investigated more someday.
if self.config.SSLserver and \
isinstance(self.connection, SSL.Connection):
self.connection.set_shutdown(SSL.SSL_SENT_SHUTDOWN |
SSL.SSL_RECEIVED_SHUTDOWN)
else:
self.connection.shutdown(1)
"""
def registerObject(self, object, namespace = ''):
if namespace == '': namespace = self.namespace
self.objmap[namespace] = object
def registerFunction(self, function, namespace = '', funcName = None):
if not funcName : funcName = function.__name__
if namespace == '': namespace = self.namespace
if self.funcmap.has_key(namespace):
self.funcmap[namespace][funcName] = function
else:
self.funcmap[namespace] = {funcName : function}
class collector:
"gathers input for POST and PUT requests"
def __init__ (self, handler, request):
self.handler = handler
self.request = request
self.data = ''
# make sure there's a content-length header
cl = request.get_header ('content-length')
if not cl:
request.error (411)
else:
cl = string.atoi (cl)
# using a 'numeric' terminator
self.request.channel.set_terminator (cl)
def collect_incoming_data (self, data):
self.data = self.data + data
def found_terminator (self):
# set the terminator back to the default
self.request.channel.set_terminator ('\r\n\r\n')
self.handler.continue_request (self.data, self.request)
if __name__ == '__main__':
import asyncore
import http_server
class Thing:
def badparam(self, param):
if param == 'good param':
return 1
else:
return Fault(faultstring='bad param')
def dt(self, aDateTime):
return aDateTime
thing = Thing()
soaph = soap_handler()
soaph.registerObject(thing)
hs = http_server.http_server('', 10080)
hs.install_handler(soaph)
asyncore.loop()
| Python |
"""
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: SOAPBuilder.py,v 1.27 2005/02/21 20:24:13 warnes Exp $'
from version import __version__
import cgi
import copy
from wstools.XMLname import toXMLname, fromXMLname
import fpconst
# SOAPpy modules
from Config import Config
from NS import NS
from Types import *
# Test whether this Python version has Types.BooleanType
# If it doesn't have it, then False and True are serialized as integers
try:
BooleanType
pythonHasBooleanType = 1
except NameError:
pythonHasBooleanType = 0
################################################################################
# SOAP Builder
################################################################################
class SOAPBuilder:
_xml_top = '<?xml version="1.0"?>\n'
_xml_enc_top = '<?xml version="1.0" encoding="%s"?>\n'
_env_top = ( '%(ENV_T)s:Envelope\n' + \
' %(ENV_T)s:encodingStyle="%(ENC)s"\n' ) % \
NS.__dict__
_env_bot = '</%(ENV_T)s:Envelope>\n' % NS.__dict__
# Namespaces potentially defined in the Envelope tag.
_env_ns = {NS.ENC: NS.ENC_T, NS.ENV: NS.ENV_T,
NS.XSD: NS.XSD_T, NS.XSD2: NS.XSD2_T, NS.XSD3: NS.XSD3_T,
NS.XSI: NS.XSI_T, NS.XSI2: NS.XSI2_T, NS.XSI3: NS.XSI3_T}
def __init__(self, args = (), kw = {}, method = None, namespace = None,
header = None, methodattrs = None, envelope = 1, encoding = 'UTF-8',
use_refs = 0, config = Config, noroot = 0):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
self.args = args
self.kw = kw
self.envelope = envelope
self.encoding = encoding
self.method = method
self.namespace = namespace
self.header = header
self.methodattrs= methodattrs
self.use_refs = use_refs
self.config = config
self.out = []
self.tcounter = 0
self.ncounter = 1
self.icounter = 1
self.envns = {}
self.ids = {}
self.depth = 0
self.multirefs = []
self.multis = 0
self.body = not isinstance(args, bodyType)
self.noroot = noroot
def build(self):
if Config.debug: print "In build."
ns_map = {}
# Cache whether typing is on or not
typed = self.config.typed
if self.header:
# Create a header.
self.dump(self.header, "Header", typed = typed)
#self.header = None # Wipe it out so no one is using it.
if self.body:
# Call genns to record that we've used SOAP-ENV.
self.depth += 1
body_ns = self.genns(ns_map, NS.ENV)[0]
self.out.append("<%sBody>\n" % body_ns)
if self.method:
# Save the NS map so that it can be restored when we
# fall out of the scope of the method definition
save_ns_map = ns_map.copy()
self.depth += 1
a = ''
if self.methodattrs:
for (k, v) in self.methodattrs.items():
a += ' %s="%s"' % (k, v)
if self.namespace: # Use the namespace info handed to us
methodns, n = self.genns(ns_map, self.namespace)
else:
methodns, n = '', ''
self.out.append('<%s%s%s%s%s>\n' % (
methodns, self.method, n, a, self.genroot(ns_map)))
try:
if type(self.args) != TupleType:
args = (self.args,)
else:
args = self.args
for i in args:
self.dump(i, typed = typed, ns_map = ns_map)
if hasattr(self.config, "argsOrdering") and self.config.argsOrdering.has_key(self.method):
for k in self.config.argsOrdering.get(self.method):
self.dump(self.kw.get(k), k, typed = typed, ns_map = ns_map)
else:
for (k, v) in self.kw.items():
self.dump(v, k, typed = typed, ns_map = ns_map)
except RecursionError:
if self.use_refs == 0:
# restart
b = SOAPBuilder(args = self.args, kw = self.kw,
method = self.method, namespace = self.namespace,
header = self.header, methodattrs = self.methodattrs,
envelope = self.envelope, encoding = self.encoding,
use_refs = 1, config = self.config)
return b.build()
raise
if self.method:
self.out.append("</%s%s>\n" % (methodns, self.method))
# End of the method definition; drop any local namespaces
ns_map = save_ns_map
self.depth -= 1
if self.body:
# dump may add to self.multirefs, but the for loop will keep
# going until it has used all of self.multirefs, even those
# entries added while in the loop.
self.multis = 1
for obj, tag in self.multirefs:
self.dump(obj, tag, typed = typed, ns_map = ns_map)
self.out.append("</%sBody>\n" % body_ns)
self.depth -= 1
if self.envelope:
e = map (lambda ns: ' xmlns:%s="%s"\n' % (ns[1], ns[0]),
self.envns.items())
self.out = ['<', self._env_top] + e + ['>\n'] + \
self.out + \
[self._env_bot]
if self.encoding != None:
self.out.insert(0, self._xml_enc_top % self.encoding)
return ''.join(self.out).encode(self.encoding)
self.out.insert(0, self._xml_top)
return ''.join(self.out)
def gentag(self):
if Config.debug: print "In gentag."
self.tcounter += 1
return "v%d" % self.tcounter
def genns(self, ns_map, nsURI):
if nsURI == None:
return ('', '')
if type(nsURI) == TupleType: # already a tuple
if len(nsURI) == 2:
ns, nsURI = nsURI
else:
ns, nsURI = None, nsURI[0]
else:
ns = None
if ns_map.has_key(nsURI):
return (ns_map[nsURI] + ':', '')
if self._env_ns.has_key(nsURI):
ns = self.envns[nsURI] = ns_map[nsURI] = self._env_ns[nsURI]
return (ns + ':', '')
if not ns:
ns = "ns%d" % self.ncounter
self.ncounter += 1
ns_map[nsURI] = ns
if self.config.buildWithNamespacePrefix:
return (ns + ':', ' xmlns:%s="%s"' % (ns, nsURI))
else:
return ('', ' xmlns="%s"' % (nsURI))
def genroot(self, ns_map):
if self.noroot:
return ''
if self.depth != 2:
return ''
ns, n = self.genns(ns_map, NS.ENC)
return ' %sroot="%d"%s' % (ns, not self.multis, n)
# checkref checks an element to see if it needs to be encoded as a
# multi-reference element or not. If it returns None, the element has
# been handled and the caller can continue with subsequent elements.
# If it returns a string, the string should be included in the opening
# tag of the marshaled element.
def checkref(self, obj, tag, ns_map):
if self.depth < 2:
return ''
if not self.ids.has_key(id(obj)):
n = self.ids[id(obj)] = self.icounter
self.icounter = n + 1
if self.use_refs == 0:
return ''
if self.depth == 2:
return ' id="i%d"' % n
self.multirefs.append((obj, tag))
else:
if self.use_refs == 0:
raise RecursionError, "Cannot serialize recursive object"
n = self.ids[id(obj)]
if self.multis and self.depth == 2:
return ' id="i%d"' % n
self.out.append('<%s href="#i%d"%s/>\n' %
(tag, n, self.genroot(ns_map)))
return None
# dumpers
def dump(self, obj, tag = None, typed = 1, ns_map = {}):
if Config.debug: print "In dump.", "obj=", obj
ns_map = ns_map.copy()
self.depth += 1
if type(tag) not in (NoneType, StringType, UnicodeType):
raise KeyError, "tag must be a string or None"
try:
meth = getattr(self, "dump_" + type(obj).__name__)
except AttributeError:
if type(obj) == LongType:
obj_type = "integer"
elif pythonHasBooleanType and type(obj) == BooleanType:
obj_type = "boolean"
else:
obj_type = type(obj).__name__
self.out.append(self.dumper(None, obj_type, obj, tag, typed,
ns_map, self.genroot(ns_map)))
else:
meth(obj, tag, typed, ns_map)
self.depth -= 1
# generic dumper
def dumper(self, nsURI, obj_type, obj, tag, typed = 1, ns_map = {},
rootattr = '', id = '',
xml = '<%(tag)s%(type)s%(id)s%(attrs)s%(root)s>%(data)s</%(tag)s>\n'):
if Config.debug: print "In dumper."
if nsURI == None:
nsURI = self.config.typesNamespaceURI
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
a = n = t = ''
if typed and obj_type:
ns, n = self.genns(ns_map, nsURI)
ins = self.genns(ns_map, self.config.schemaNamespaceURI)[0]
t = ' %stype="%s%s"%s' % (ins, ns, obj_type, n)
try: a = obj._marshalAttrs(ns_map, self)
except: pass
try: data = obj._marshalData()
except:
if (obj_type != "string"): # strings are already encoded
data = cgi.escape(str(obj))
else:
data = obj
return xml % {"tag": tag, "type": t, "data": data, "root": rootattr,
"id": id, "attrs": a}
def dump_float(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_float."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
if Config.strict_range:
doubleType(obj)
if fpconst.isPosInf(obj):
obj = "INF"
elif fpconst.isNegInf(obj):
obj = "-INF"
elif fpconst.isNaN(obj):
obj = "NaN"
else:
obj = repr(obj)
# Note: python 'float' is actually a SOAP 'double'.
self.out.append(self.dumper(None, "double", obj, tag, typed, ns_map,
self.genroot(ns_map)))
def dump_string(self, obj, tag, typed = 0, ns_map = {}):
if Config.debug: print "In dump_string."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try: data = obj._marshalData()
except: data = obj
self.out.append(self.dumper(None, "string", cgi.escape(data), tag,
typed, ns_map, self.genroot(ns_map), id))
dump_str = dump_string # For Python 2.2+
dump_unicode = dump_string
def dump_None(self, obj, tag, typed = 0, ns_map = {}):
if Config.debug: print "In dump_None."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
ns = self.genns(ns_map, self.config.schemaNamespaceURI)[0]
self.out.append('<%s %snull="1"%s/>\n' %
(tag, ns, self.genroot(ns_map)))
dump_NoneType = dump_None # For Python 2.2+
def dump_list(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_list.", "obj=", obj
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
if type(obj) == InstanceType:
data = obj.data
else:
data = obj
if typed:
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try:
sample = data[0]
empty = 0
except:
# preserve type if present
if getattr(obj,"_typed",None) and getattr(obj,"_type",None):
if getattr(obj, "_complexType", None):
sample = typedArrayType(typed=obj._type,
complexType = obj._complexType)
sample._typename = obj._type
if not getattr(obj,"_ns",None): obj._ns = NS.URN
else:
sample = typedArrayType(typed=obj._type)
else:
sample = structType()
empty = 1
# First scan list to see if all are the same type
same_type = 1
if not empty:
for i in data[1:]:
if type(sample) != type(i) or \
(type(sample) == InstanceType and \
sample.__class__ != i.__class__):
same_type = 0
break
ndecl = ''
if same_type:
if (isinstance(sample, structType)) or \
type(sample) == DictType or \
(isinstance(sample, anyType) and \
(getattr(sample, "_complexType", None) and \
sample._complexType)): # force to urn struct
try:
tns = obj._ns or NS.URN
except:
tns = NS.URN
ns, ndecl = self.genns(ns_map, tns)
try:
typename = sample._typename
except:
typename = "SOAPStruct"
t = ns + typename
elif isinstance(sample, anyType):
ns = sample._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ns, ndecl = self.genns(ns_map, ns)
t = ns + str(sample._type)
else:
t = 'ur-type'
else:
typename = type(sample).__name__
# For Python 2.2+
if type(sample) == StringType: typename = 'string'
# HACK: unicode is a SOAP string
if type(sample) == UnicodeType: typename = 'string'
# HACK: python 'float' is actually a SOAP 'double'.
if typename=="float": typename="double"
t = self.genns(ns_map, self.config.typesNamespaceURI)[0] + \
typename
else:
t = self.genns(ns_map, self.config.typesNamespaceURI)[0] + \
"ur-type"
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
ens, edecl = self.genns(ns_map, NS.ENC)
ins, idecl = self.genns(ns_map, self.config.schemaNamespaceURI)
if typed:
self.out.append(
'<%s %sarrayType="%s[%d]" %stype="%sArray"%s%s%s%s%s%s>\n' %
(tag, ens, t, len(data), ins, ens, ndecl, edecl, idecl,
self.genroot(ns_map), id, a))
if typed:
try: elemsname = obj._elemsname
except: elemsname = "item"
else:
elemsname = tag
for i in data:
self.dump(i, elemsname, not same_type, ns_map)
if typed: self.out.append('</%s>\n' % tag)
dump_tuple = dump_list
def dump_dictionary(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_dictionary."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
self.out.append('<%s%s%s%s>\n' %
(tag, id, a, self.genroot(ns_map)))
for (k, v) in obj.items():
if k[0] != "_":
self.dump(v, k, 1, ns_map)
self.out.append('</%s>\n' % tag)
dump_dict = dump_dictionary # For Python 2.2+
def dump_instance(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_instance.", "obj=", obj, "tag=", tag
if not tag:
# If it has a name use it.
if isinstance(obj, anyType) and obj._name:
tag = obj._name
else:
tag = self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
if isinstance(obj, arrayType): # Array
self.dump_list(obj, tag, typed, ns_map)
return
if isinstance(obj, faultType): # Fault
cns, cdecl = self.genns(ns_map, NS.ENC)
vns, vdecl = self.genns(ns_map, NS.ENV)
self.out.append('''<%sFault %sroot="1"%s%s>
<faultcode>%s</faultcode>
<faultstring>%s</faultstring>
''' % (vns, cns, vdecl, cdecl, obj.faultcode, obj.faultstring))
if hasattr(obj, "detail"):
self.dump(obj.detail, "detail", typed, ns_map)
self.out.append("</%sFault>\n" % vns)
return
r = self.genroot(ns_map)
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
if isinstance(obj, voidType): # void
self.out.append("<%s%s%s></%s>\n" % (tag, a, r, tag))
return
id = self.checkref(obj, tag, ns_map)
if id == None:
return
if isinstance(obj, structType):
# Check for namespace
ndecl = ''
ns = obj._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ns, ndecl = self.genns(ns_map, ns)
tag = ns + tag
self.out.append("<%s%s%s%s%s>\n" % (tag, ndecl, id, a, r))
keylist = obj.__dict__.keys()
# first write out items with order information
if hasattr(obj, '_keyord'):
for i in range(len(obj._keyord)):
self.dump(obj._aslist(i), obj._keyord[i], 1, ns_map)
keylist.remove(obj._keyord[i])
# now write out the rest
for k in keylist:
if (k[0] != "_"):
self.dump(getattr(obj,k), k, 1, ns_map)
if isinstance(obj, bodyType):
self.multis = 1
for v, k in self.multirefs:
self.dump(v, k, typed = typed, ns_map = ns_map)
self.out.append('</%s>\n' % tag)
elif isinstance(obj, anyType):
t = ''
if typed:
ns = obj._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ons, ondecl = self.genns(ns_map, ns)
ins, indecl = self.genns(ns_map,
self.config.schemaNamespaceURI)
t = ' %stype="%s%s"%s%s' % \
(ins, ons, obj._type, ondecl, indecl)
self.out.append('<%s%s%s%s%s>%s</%s>\n' %
(tag, t, id, a, r, obj._marshalData(), tag))
else: # Some Class
self.out.append('<%s%s%s>\n' % (tag, id, r))
for (k, v) in obj.__dict__.items():
if k[0] != "_":
self.dump(v, k, 1, ns_map)
self.out.append('</%s>\n' % tag)
################################################################################
# SOAPBuilder's more public interface
################################################################################
def buildSOAP(args=(), kw={}, method=None, namespace=None,
header=None, methodattrs=None, envelope=1, encoding='UTF-8',
config=Config, noroot = 0):
t = SOAPBuilder(args=args, kw=kw, method=method, namespace=namespace,
header=header, methodattrs=methodattrs,envelope=envelope,
encoding=encoding, config=config,noroot=noroot)
return t.build()
| Python |
"""
################################################################################
#
# SOAPpy - Cayce Ullman (cayce@actzero.com)
# Brian Matthews (blm@actzero.com)
# Gregory Warnes (Gregory.R.Warnes@Pfizer.com)
# Christopher Blunck (blunck@gst.com)
#
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
from __future__ import nested_scopes
ident = '$Id: NS.py,v 1.4 2005/02/15 16:32:22 warnes Exp $'
from version import __version__
##############################################################################
# Namespace Class
################################################################################
def invertDict(dict):
d = {}
for k, v in dict.items():
d[v] = k
return d
class NS:
XML = "http://www.w3.org/XML/1998/namespace"
ENV = "http://schemas.xmlsoap.org/soap/envelope/"
ENC = "http://schemas.xmlsoap.org/soap/encoding/"
XSD = "http://www.w3.org/1999/XMLSchema"
XSD2 = "http://www.w3.org/2000/10/XMLSchema"
XSD3 = "http://www.w3.org/2001/XMLSchema"
XSD_L = [XSD, XSD2, XSD3]
EXSD_L= [ENC, XSD, XSD2, XSD3]
XSI = "http://www.w3.org/1999/XMLSchema-instance"
XSI2 = "http://www.w3.org/2000/10/XMLSchema-instance"
XSI3 = "http://www.w3.org/2001/XMLSchema-instance"
XSI_L = [XSI, XSI2, XSI3]
URN = "http://soapinterop.org/xsd"
# For generated messages
XML_T = "xml"
ENV_T = "SOAP-ENV"
ENC_T = "SOAP-ENC"
XSD_T = "xsd"
XSD2_T= "xsd2"
XSD3_T= "xsd3"
XSI_T = "xsi"
XSI2_T= "xsi2"
XSI3_T= "xsi3"
URN_T = "urn"
NSMAP = {ENV_T: ENV, ENC_T: ENC, XSD_T: XSD, XSD2_T: XSD2,
XSD3_T: XSD3, XSI_T: XSI, XSI2_T: XSI2, XSI3_T: XSI3,
URN_T: URN}
NSMAP_R = invertDict(NSMAP)
STMAP = {'1999': (XSD_T, XSI_T), '2000': (XSD2_T, XSI2_T),
'2001': (XSD3_T, XSI3_T)}
STMAP_R = invertDict(STMAP)
def __init__(self):
raise Error, "Don't instantiate this"
| Python |
"""
################################################################################
#
# SOAPpy - Cayce Ullman (cayce@actzero.com)
# Brian Matthews (blm@actzero.com)
# Gregory Warnes (Gregory.R.Warnes@Pfizer.com)
# Christopher Blunck (blunck@gst.com)
#
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: Errors.py,v 1.5 2005/02/15 16:32:22 warnes Exp $'
from version import __version__
import exceptions
################################################################################
# Exceptions
################################################################################
class Error(exceptions.Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "<Error : %s>" % self.msg
__repr__ = __str__
def __call__(self):
return (msg,)
class RecursionError(Error):
pass
class UnknownTypeError(Error):
pass
class HTTPError(Error):
# indicates an HTTP protocol error
def __init__(self, code, msg):
self.code = code
self.msg = msg
def __str__(self):
return "<HTTPError %s %s>" % (self.code, self.msg)
__repr__ = __str__
def __call___(self):
return (self.code, self.msg, )
class UnderflowError(exceptions.ArithmeticError):
pass
| Python |
"""
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
from __future__ import nested_scopes
ident = '$Id: Types.py,v 1.19 2005/02/22 04:29:43 warnes Exp $'
from version import __version__
import UserList
import base64
import cgi
import urllib
import copy
import re
import time
from types import *
# SOAPpy modules
from Errors import *
from NS import NS
from Utilities import encodeHexString, cleanDate
from Config import Config
###############################################################################
# Utility functions
###############################################################################
def isPrivate(name): return name[0]=='_'
def isPublic(name): return name[0]!='_'
###############################################################################
# Types and Wrappers
###############################################################################
class anyType:
_validURIs = (NS.XSD, NS.XSD2, NS.XSD3, NS.ENC)
def __init__(self, data = None, name = None, typed = 1, attrs = None):
if self.__class__ == anyType:
raise Error, "anyType can't be instantiated directly"
if type(name) in (ListType, TupleType):
self._ns, self._name = name
else:
self._ns = self._validURIs[0]
self._name = name
self._typed = typed
self._attrs = {}
self._cache = None
self._type = self._typeName()
self._data = self._checkValueSpace(data)
if attrs != None:
self._setAttrs(attrs)
def __str__(self):
if hasattr(self,'_name') and self._name:
return "<%s %s at %d>" % (self.__class__, self._name, id(self))
return "<%s at %d>" % (self.__class__, id(self))
__repr__ = __str__
def _checkValueSpace(self, data):
return data
def _marshalData(self):
return str(self._data)
def _marshalAttrs(self, ns_map, builder):
a = ''
for attr, value in self._attrs.items():
ns, n = builder.genns(ns_map, attr[0])
a += n + ' %s%s="%s"' % \
(ns, attr[1], cgi.escape(str(value), 1))
return a
def _fixAttr(self, attr):
if type(attr) in (StringType, UnicodeType):
attr = (None, attr)
elif type(attr) == ListType:
attr = tuple(attr)
elif type(attr) != TupleType:
raise AttributeError, "invalid attribute type"
if len(attr) != 2:
raise AttributeError, "invalid attribute length"
if type(attr[0]) not in (NoneType, StringType, UnicodeType):
raise AttributeError, "invalid attribute namespace URI type"
return attr
def _getAttr(self, attr):
attr = self._fixAttr(attr)
try:
return self._attrs[attr]
except:
return None
def _setAttr(self, attr, value):
attr = self._fixAttr(attr)
if type(value) is StringType:
value = unicode(value)
self._attrs[attr] = value
def _setAttrs(self, attrs):
if type(attrs) in (ListType, TupleType):
for i in range(0, len(attrs), 2):
self._setAttr(attrs[i], attrs[i + 1])
return
if type(attrs) == DictType:
d = attrs
elif isinstance(attrs, anyType):
d = attrs._attrs
else:
raise AttributeError, "invalid attribute type"
for attr, value in d.items():
self._setAttr(attr, value)
def _setMustUnderstand(self, val):
self._setAttr((NS.ENV, "mustUnderstand"), val)
def _getMustUnderstand(self):
return self._getAttr((NS.ENV, "mustUnderstand"))
def _setActor(self, val):
self._setAttr((NS.ENV, "actor"), val)
def _getActor(self):
return self._getAttr((NS.ENV, "actor"))
def _typeName(self):
return self.__class__.__name__[:-4]
def _validNamespaceURI(self, URI, strict):
if not hasattr(self, '_typed') or not self._typed:
return None
if URI in self._validURIs:
return URI
if not strict:
return self._ns
raise AttributeError, \
"not a valid namespace for type %s" % self._type
class voidType(anyType):
pass
class stringType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type:" % self._type
return data
class untypedType(stringType):
def __init__(self, data = None, name = None, attrs = None):
stringType.__init__(self, data, name, 0, attrs)
class IDType(stringType): pass
class NCNameType(stringType): pass
class NameType(stringType): pass
class ENTITYType(stringType): pass
class IDREFType(stringType): pass
class languageType(stringType): pass
class NMTOKENType(stringType): pass
class QNameType(stringType): pass
class tokenType(anyType):
_validURIs = (NS.XSD2, NS.XSD3)
__invalidre = '[\n\t]|^ | $| '
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
if type(self.__invalidre) == StringType:
self.__invalidre = re.compile(self.__invalidre)
if self.__invalidre.search(data):
raise ValueError, "invalid %s value" % self._type
return data
class normalizedStringType(anyType):
_validURIs = (NS.XSD3,)
__invalidre = '[\n\r\t]'
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
if type(self.__invalidre) == StringType:
self.__invalidre = re.compile(self.__invalidre)
if self.__invalidre.search(data):
raise ValueError, "invalid %s value" % self._type
return data
class CDATAType(normalizedStringType):
_validURIs = (NS.XSD2,)
class booleanType(anyType):
def __int__(self):
return self._data
__nonzero__ = __int__
def _marshalData(self):
return ['false', 'true'][self._data]
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if data in (0, '0', 'false', ''):
return 0
if data in (1, '1', 'true'):
return 1
raise ValueError, "invalid %s value" % self._type
class decimalType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType):
raise Error, "invalid %s value" % self._type
return data
class floatType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType) or \
data < -3.4028234663852886E+38 or \
data > 3.4028234663852886E+38:
raise ValueError, "invalid %s value: %s" % (self._type, repr(data))
return data
def _marshalData(self):
return "%.18g" % self._data # More precision
class doubleType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType) or \
data < -1.7976931348623158E+308 or \
data > 1.7976931348623157E+308:
raise ValueError, "invalid %s value: %s" % (self._type, repr(data))
return data
def _marshalData(self):
return "%.18g" % self._data # More precision
class durationType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
try:
# A tuple or a scalar is OK, but make them into a list
if type(data) == TupleType:
data = list(data)
elif type(data) != ListType:
data = [data]
if len(data) > 6:
raise Exception, "too many values"
# Now check the types of all the components, and find
# the first nonzero element along the way.
f = -1
for i in range(len(data)):
if data[i] == None:
data[i] = 0
continue
if type(data[i]) not in \
(IntType, LongType, FloatType):
raise Exception, "element %d a bad type" % i
if data[i] and f == -1:
f = i
# If they're all 0, just use zero seconds.
if f == -1:
self._cache = 'PT0S'
return (0,) * 6
# Make sure only the last nonzero element has a decimal fraction
# and only the first element is negative.
d = -1
for i in range(f, len(data)):
if data[i]:
if d != -1:
raise Exception, \
"all except the last nonzero element must be " \
"integers"
if data[i] < 0 and i > f:
raise Exception, \
"only the first nonzero element can be negative"
elif data[i] != long(data[i]):
d = i
# Pad the list on the left if necessary.
if len(data) < 6:
n = 6 - len(data)
f += n
d += n
data = [0] * n + data
# Save index of the first nonzero element and the decimal
# element for _marshalData.
self.__firstnonzero = f
self.__decimal = d
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
t = 0
if d[self.__firstnonzero] < 0:
s = '-P'
else:
s = 'P'
t = 0
for i in range(self.__firstnonzero, len(d)):
if d[i]:
if i > 2 and not t:
s += 'T'
t = 1
if self.__decimal == i:
s += "%g" % abs(d[i])
else:
s += "%d" % long(abs(d[i]))
s += ['Y', 'M', 'D', 'H', 'M', 'S'][i]
self._cache = s
return self._cache
class timeDurationType(durationType):
_validURIs = (NS.XSD, NS.XSD2, NS.ENC)
class dateTimeType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.time()
if (type(data) in (IntType, LongType)):
data = list(time.gmtime(data)[:6])
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[:6])
data[5] += f
elif type(data) in (ListType, TupleType):
if len(data) < 6:
raise Exception, "not enough values"
if len(data) > 9:
raise Exception, "too many values"
data = list(data[:6])
cleanDate(data)
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02d-%02dT%02d:%02d:%02d" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
f = d[5] - int(d[5])
if f != 0:
s += ("%g" % f)[1:]
s += 'Z'
self._cache = s
return self._cache
class recurringInstantType(anyType):
_validURIs = (NS.XSD,)
def _checkValueSpace(self, data):
try:
if data == None:
data = list(time.gmtime(time.time())[:6])
if (type(data) in (IntType, LongType)):
data = list(time.gmtime(data)[:6])
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[:6])
data[5] += f
elif type(data) in (ListType, TupleType):
if len(data) < 1:
raise Exception, "not enough values"
if len(data) > 9:
raise Exception, "too many values"
data = list(data[:6])
if len(data) < 6:
data += [0] * (6 - len(data))
f = len(data)
for i in range(f):
if data[i] == None:
if f < i:
raise Exception, \
"only leftmost elements can be none"
else:
f = i
break
cleanDate(data, f)
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
e = list(d)
neg = ''
if not e[0]:
e[0] = '--'
else:
if e[0] < 0:
neg = '-'
e[0] = abs(e[0])
if e[0] < 100:
e[0] = '-' + "%02d" % e[0]
else:
e[0] = "%04d" % e[0]
for i in range(1, len(e)):
if e[i] == None or (i < 3 and e[i] == 0):
e[i] = '-'
else:
if e[i] < 0:
neg = '-'
e[i] = abs(e[i])
e[i] = "%02d" % e[i]
if d[5]:
f = abs(d[5] - int(d[5]))
if f:
e[5] += ("%g" % f)[1:]
s = "%s%s-%s-%sT%s:%s:%sZ" % ((neg,) + tuple(e))
self._cache = s
return self._cache
class timeInstantType(dateTimeType):
_validURIs = (NS.XSD, NS.XSD2, NS.ENC)
class timePeriodType(dateTimeType):
_validURIs = (NS.XSD2, NS.ENC)
class timeType(anyType):
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[3:6]
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[3:6])
data[2] += f
elif type(data) in (IntType, LongType):
data = time.gmtime(data)[3:6]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[3:6]
elif len(data) > 3:
raise Exception, "too many values"
data = [None, None, None] + list(data)
if len(data) < 6:
data += [0] * (6 - len(data))
cleanDate(data, 3)
data = data[3:]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = ''
s = time.strftime("%H:%M:%S", (0, 0, 0) + d + (0, 0, -1))
f = d[2] - int(d[2])
if f != 0:
s += ("%g" % f)[1:]
s += 'Z'
self._cache = s
return self._cache
class dateType(anyType):
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:3]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[0:3]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:3]
elif len(data) > 3:
raise Exception, "too many values"
data = list(data)
if len(data) < 3:
data += [1, 1, 1][len(data):]
data += [0, 0, 0]
cleanDate(data)
data = data[:3]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02d-%02dZ" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
self._cache = s
return self._cache
class gYearMonthType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:2]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[0:2]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:2]
elif len(data) > 2:
raise Exception, "too many values"
data = list(data)
if len(data) < 2:
data += [1, 1][len(data):]
data += [1, 0, 0, 0]
cleanDate(data)
data = data[:2]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02dZ" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
self._cache = s
return self._cache
class gYearType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:1]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:1]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04dZ" % abs(d)
if d < 0:
s = '-' + s
self._cache = s
return self._cache
class centuryType(anyType):
_validURIs = (NS.XSD2, NS.ENC)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:1] / 100
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:1] / 100
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%02dZ" % abs(d)
if d < 0:
s = '-' + s
self._cache = s
return self._cache
class yearType(gYearType):
_validURIs = (NS.XSD2, NS.ENC)
class gMonthDayType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[1:3]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[1:3]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:2]
elif len(data) > 2:
raise Exception, "too many values"
data = list(data)
if len(data) < 2:
data += [1, 1][len(data):]
data = [0] + data + [0, 0, 0]
cleanDate(data, 1)
data = data[1:3]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
self._cache = "--%02d-%02dZ" % self._data
return self._cache
class recurringDateType(gMonthDayType):
_validURIs = (NS.XSD2, NS.ENC)
class gMonthType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[1:2]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[1:2]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
if data[0] < 1 or data[0] > 12:
raise Exception, "bad value"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
self._cache = "--%02d--Z" % self._data
return self._cache
class monthType(gMonthType):
_validURIs = (NS.XSD2, NS.ENC)
class gDayType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[2:3]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[2:3]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
if data[0] < 1 or data[0] > 31:
raise Exception, "bad value"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
self._cache = "---%02dZ" % self._data
return self._cache
class recurringDayType(gDayType):
_validURIs = (NS.XSD2, NS.ENC)
class hexBinaryType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = encodeHexString(self._data)
return self._cache
class base64BinaryType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = base64.encodestring(self._data)
return self._cache
class base64Type(base64BinaryType):
_validURIs = (NS.ENC,)
class binaryType(anyType):
_validURIs = (NS.XSD, NS.ENC)
def __init__(self, data, name = None, typed = 1, encoding = 'base64',
attrs = None):
anyType.__init__(self, data, name, typed, attrs)
self._setAttr('encoding', encoding)
def _marshalData(self):
if self._cache == None:
if self._getAttr((None, 'encoding')) == 'base64':
self._cache = base64.encodestring(self._data)
else:
self._cache = encodeHexString(self._data)
return self._cache
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _setAttr(self, attr, value):
attr = self._fixAttr(attr)
if attr[1] == 'encoding':
if attr[0] != None or value not in ('base64', 'hex'):
raise AttributeError, "invalid encoding"
self._cache = None
anyType._setAttr(self, attr, value)
class anyURIType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = urllib.quote(self._data)
return self._cache
class uriType(anyURIType):
_validURIs = (NS.XSD,)
class uriReferenceType(anyURIType):
_validURIs = (NS.XSD2,)
class NOTATIONType(anyType):
def __init__(self, data, name = None, typed = 1, attrs = None):
if self.__class__ == NOTATIONType:
raise Error, "a NOTATION can't be instantiated directly"
anyType.__init__(self, data, name, typed, attrs)
class ENTITIESType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) in (StringType, UnicodeType):
return (data,)
if type(data) not in (ListType, TupleType) or \
filter (lambda x: type(x) not in (StringType, UnicodeType), data):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
return ' '.join(self._data)
class IDREFSType(ENTITIESType): pass
class NMTOKENSType(ENTITIESType): pass
class integerType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType):
raise ValueError, "invalid %s value" % self._type
return data
class nonPositiveIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data > 0:
raise ValueError, "invalid %s value" % self._type
return data
class non_Positive_IntegerType(nonPositiveIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'non-positive-integer'
class negativeIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data >= 0:
raise ValueError, "invalid %s value" % self._type
return data
class negative_IntegerType(negativeIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'negative-integer'
class longType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -9223372036854775808L or \
data > 9223372036854775807L:
raise ValueError, "invalid %s value" % self._type
return data
class intType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -2147483648L or \
data > 2147483647:
raise ValueError, "invalid %s value" % self._type
return data
class shortType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -32768 or \
data > 32767:
raise ValueError, "invalid %s value" % self._type
return data
class byteType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -128 or \
data > 127:
raise ValueError, "invalid %s value" % self._type
return data
class nonNegativeIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data < 0:
raise ValueError, "invalid %s value" % self._type
return data
class non_Negative_IntegerType(nonNegativeIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'non-negative-integer'
class unsignedLongType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 18446744073709551615L:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedIntType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 4294967295L:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedShortType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 65535:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedByteType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 255:
raise ValueError, "invalid %s value" % self._type
return data
class positiveIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data <= 0:
raise ValueError, "invalid %s value" % self._type
return data
class positive_IntegerType(positiveIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'positive-integer'
# Now compound types
class compoundType(anyType):
def __init__(self, data = None, name = None, typed = 1, attrs = None):
if self.__class__ == compoundType:
raise Error, "a compound can't be instantiated directly"
anyType.__init__(self, data, name, typed, attrs)
self._keyord = []
if type(data) == DictType:
self.__dict__.update(data)
def _aslist(self, item=None):
if item is not None:
return self.__dict__[self._keyord[item]]
else:
return map( lambda x: self.__dict__[x], self._keyord)
def _asdict(self, item=None, encoding=Config.dict_encoding):
if item is not None:
if type(item) in (UnicodeType,StringType):
item = item.encode(encoding)
return self.__dict__[item]
else:
retval = {}
def fun(x): retval[x.encode(encoding)] = self.__dict__[x]
if hasattr(self, '_keyord'):
map( fun, self._keyord)
else:
for name in dir(self):
if isPublic(name):
retval[name] = getattr(self,name)
return retval
def __getitem__(self, item):
if type(item) == IntType:
return self.__dict__[self._keyord[item]]
else:
return getattr(self, item)
def __len__(self):
return len(self._keyord)
def __nonzero__(self):
return 1
def _keys(self):
return filter(lambda x: x[0] != '_', self.__dict__.keys())
def _addItem(self, name, value, attrs = None):
if name in self._keyord:
if type(self.__dict__[name]) != ListType:
self.__dict__[name] = [self.__dict__[name]]
self.__dict__[name].append(value)
else:
self.__dict__[name] = value
self._keyord.append(name)
def _placeItem(self, name, value, pos, subpos = 0, attrs = None):
if subpos == 0 and type(self.__dict__[name]) != ListType:
self.__dict__[name] = value
else:
self.__dict__[name][subpos] = value
self._keyord[pos] = name
def _getItemAsList(self, name, default = []):
try:
d = self.__dict__[name]
except:
return default
if type(d) == ListType:
return d
return [d]
def __str__(self):
return anyType.__str__(self) + ": " + str(self._asdict())
def __repr__(self):
return self.__str__()
class structType(compoundType):
pass
class headerType(structType):
_validURIs = (NS.ENV,)
def __init__(self, data = None, typed = 1, attrs = None):
structType.__init__(self, data, "Header", typed, attrs)
class bodyType(structType):
_validURIs = (NS.ENV,)
def __init__(self, data = None, typed = 1, attrs = None):
structType.__init__(self, data, "Body", typed, attrs)
class arrayType(UserList.UserList, compoundType):
def __init__(self, data = None, name = None, attrs = None,
offset = 0, rank = None, asize = 0, elemsname = None):
if data:
if type(data) not in (ListType, TupleType):
raise Error, "Data must be a sequence"
UserList.UserList.__init__(self, data)
compoundType.__init__(self, data, name, 0, attrs)
self._elemsname = elemsname or "item"
if data == None:
self._rank = rank
# According to 5.4.2.2 in the SOAP spec, each element in a
# sparse array must have a position. _posstate keeps track of
# whether we've seen a position or not. It's possible values
# are:
# -1 No elements have been added, so the state is indeterminate
# 0 An element without a position has been added, so no
# elements can have positions
# 1 An element with a position has been added, so all elements
# must have positions
self._posstate = -1
self._full = 0
if asize in ('', None):
asize = '0'
self._dims = map (lambda x: int(x), str(asize).split(','))
self._dims.reverse() # It's easier to work with this way
self._poss = [0] * len(self._dims) # This will end up
# reversed too
for i in range(len(self._dims)):
if self._dims[i] < 0 or \
self._dims[i] == 0 and len(self._dims) > 1:
raise TypeError, "invalid Array dimensions"
if offset > 0:
self._poss[i] = offset % self._dims[i]
offset = int(offset / self._dims[i])
# Don't break out of the loop if offset is 0 so we test all the
# dimensions for > 0.
if offset:
raise AttributeError, "invalid Array offset"
a = [None] * self._dims[0]
for i in range(1, len(self._dims)):
b = []
for j in range(self._dims[i]):
b.append(copy.deepcopy(a))
a = b
self.data = a
def _aslist(self, item=None):
if item is not None:
return self.data[int(item)]
else:
return self.data
def _asdict(self, item=None, encoding=Config.dict_encoding):
if item is not None:
if type(item) in (UnicodeType,StringType):
item = item.encode(encoding)
return self.data[int(item)]
else:
retval = {}
def fun(x): retval[str(x).encode(encoding)] = self.data[x]
map( fun, range(len(self.data)) )
return retval
def __getitem__(self, item):
try:
return self.data[int(item)]
except ValueError:
return getattr(self, item)
def __len__(self):
return len(self.data)
def __nonzero__(self):
return 1
def __str__(self):
return anyType.__str__(self) + ": " + str(self._aslist())
def _keys(self):
return filter(lambda x: x[0] != '_', self.__dict__.keys())
def _addItem(self, name, value, attrs):
if self._full:
raise ValueError, "Array is full"
pos = attrs.get((NS.ENC, 'position'))
if pos != None:
if self._posstate == 0:
raise AttributeError, \
"all elements in a sparse Array must have a " \
"position attribute"
self._posstate = 1
try:
if pos[0] == '[' and pos[-1] == ']':
pos = map (lambda x: int(x), pos[1:-1].split(','))
pos.reverse()
if len(pos) == 1:
pos = pos[0]
curpos = [0] * len(self._dims)
for i in range(len(self._dims)):
curpos[i] = pos % self._dims[i]
pos = int(pos / self._dims[i])
if pos == 0:
break
if pos:
raise Exception
elif len(pos) != len(self._dims):
raise Exception
else:
for i in range(len(self._dims)):
if pos[i] >= self._dims[i]:
raise Exception
curpos = pos
else:
raise Exception
except:
raise AttributeError, \
"invalid Array element position %s" % str(pos)
else:
if self._posstate == 1:
raise AttributeError, \
"only elements in a sparse Array may have a " \
"position attribute"
self._posstate = 0
curpos = self._poss
a = self.data
for i in range(len(self._dims) - 1, 0, -1):
a = a[curpos[i]]
if curpos[0] >= len(a):
a += [None] * (len(a) - curpos[0] + 1)
a[curpos[0]] = value
if pos == None:
self._poss[0] += 1
for i in range(len(self._dims) - 1):
if self._poss[i] < self._dims[i]:
break
self._poss[i] = 0
self._poss[i + 1] += 1
if self._dims[-1] and self._poss[-1] >= self._dims[-1]:
#self._full = 1
#FIXME: why is this occuring?
pass
def _placeItem(self, name, value, pos, subpos, attrs = None):
curpos = [0] * len(self._dims)
for i in range(len(self._dims)):
if self._dims[i] == 0:
curpos[0] = pos
break
curpos[i] = pos % self._dims[i]
pos = int(pos / self._dims[i])
if pos == 0:
break
if self._dims[i] != 0 and pos:
raise Error, "array index out of range"
a = self.data
for i in range(len(self._dims) - 1, 0, -1):
a = a[curpos[i]]
if curpos[0] >= len(a):
a += [None] * (len(a) - curpos[0] + 1)
a[curpos[0]] = value
class typedArrayType(arrayType):
def __init__(self, data = None, name = None, typed = None, attrs = None,
offset = 0, rank = None, asize = 0, elemsname = None, complexType = 0):
arrayType.__init__(self, data, name, attrs, offset, rank, asize,
elemsname)
self._typed = 1
self._type = typed
self._complexType = complexType
class faultType(structType, Error):
def __init__(self, faultcode = "", faultstring = "", detail = None):
self.faultcode = faultcode
self.faultstring = faultstring
if detail != None:
self.detail = detail
structType.__init__(self, None, 0)
def _setDetail(self, detail = None):
if detail != None:
self.detail = detail
else:
try: del self.detail
except AttributeError: pass
def __repr__(self):
if getattr(self, 'detail', None) != None:
return "<Fault %s: %s: %s>" % (self.faultcode,
self.faultstring,
self.detail)
else:
return "<Fault %s: %s>" % (self.faultcode, self.faultstring)
__str__ = __repr__
def __call__(self):
return (self.faultcode, self.faultstring, self.detail)
class SOAPException(Exception):
def __init__(self, code="", string="", detail=None):
self.value = ("SOAPpy SOAP Exception", code, string, detail)
self.code = code
self.string = string
self.detail = detail
def __str__(self):
return repr(self.value)
class RequiredHeaderMismatch(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MethodNotFound(Exception):
def __init__(self, value):
(val, detail) = value.split(":")
self.value = val
self.detail = detail
def __str__(self):
return repr(self.value, self.detail)
class AuthorizationFailed(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MethodFailed(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
#######
# Convert complex SOAPpy objects to native python equivalents
#######
def simplify(object, level=0):
"""
Convert the SOAPpy objects and thier contents to simple python types.
This function recursively converts the passed 'container' object,
and all public subobjects. (Private subobjects have names that
start with '_'.)
Conversions:
- faultType --> raise python exception
- arrayType --> array
- compoundType --> dictionary
"""
if level > 10:
return object
if isinstance( object, faultType ):
if object.faultstring == "Required Header Misunderstood":
raise RequiredHeaderMismatch(object.detail)
elif object.faultstring == "Method Not Found":
raise MethodNotFound(object.detail)
elif object.faultstring == "Authorization Failed":
raise AuthorizationFailed(object.detail)
elif object.faultstring == "Method Failed":
raise MethodFailed(object.detail)
else:
se = SOAPException(object.faultcode, object.faultstring,
object.detail)
raise se
elif isinstance( object, arrayType ):
data = object._aslist()
for k in range(len(data)):
data[k] = simplify(data[k], level=level+1)
return data
elif isinstance( object, compoundType ) or isinstance(object, structType):
data = object._asdict()
for k in data.keys():
if isPublic(k):
data[k] = simplify(data[k], level=level+1)
return data
elif type(object)==DictType:
for k in object.keys():
if isPublic(k):
object[k] = simplify(object[k])
return object
elif type(object)==list:
for k in range(len(object)):
object[k] = simplify(object[k])
return object
else:
return object
def simplify_contents(object, level=0):
"""
Convert the contents of SOAPpy objects to simple python types.
This function recursively converts the sub-objects contained in a
'container' object to simple python types.
Conversions:
- faultType --> raise python exception
- arrayType --> array
- compoundType --> dictionary
"""
if level>10: return object
if isinstance( object, faultType ):
for k in object._keys():
if isPublic(k):
setattr(object, k, simplify(object[k], level=level+1))
raise object
elif isinstance( object, arrayType ):
data = object._aslist()
for k in range(len(data)):
object[k] = simplify(data[k], level=level+1)
elif isinstance(object, structType):
data = object._asdict()
for k in data.keys():
if isPublic(k):
setattr(object, k, simplify(data[k], level=level+1))
elif isinstance( object, compoundType ) :
data = object._asdict()
for k in data.keys():
if isPublic(k):
object[k] = simplify(data[k], level=level+1)
elif type(object)==DictType:
for k in object.keys():
if isPublic(k):
object[k] = simplify(object[k])
elif type(object)==list:
for k in range(len(object)):
object[k] = simplify(object[k])
return object
| Python |
"""
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: Config.py,v 1.9 2004/01/31 04:20:05 warnes Exp $'
from version import __version__
import copy, socket
from types import *
from NS import NS
################################################################################
# Configuration class
################################################################################
class SOAPConfig:
__readonly = ('SSLserver', 'SSLclient', 'GSIserver', 'GSIclient')
def __init__(self, config = None, **kw):
d = self.__dict__
if config:
if not isinstance(config, SOAPConfig):
raise AttributeError, \
"initializer must be SOAPConfig instance"
s = config.__dict__
for k, v in s.items():
if k[0] != '_':
d[k] = v
else:
# Setting debug also sets returnFaultInfo,
# dumpHeadersIn, dumpHeadersOut, dumpSOAPIn, and dumpSOAPOut
self.debug = 0
self.dumpFaultInfo = 1
# Setting namespaceStyle sets typesNamespace, typesNamespaceURI,
# schemaNamespace, and schemaNamespaceURI
self.namespaceStyle = '1999'
self.strictNamespaces = 0
self.typed = 1
self.buildWithNamespacePrefix = 1
self.returnAllAttrs = 0
# Strict checking of range for floats and doubles
self.strict_range = 0
# Default encoding for dictionary keys
self.dict_encoding = 'ascii'
# New argument name handling mechanism. See
# README.MethodParameterNaming for details
self.specialArgs = 1
# If unwrap_results=1 and there is only element in the struct,
# SOAPProxy will assume that this element is the result
# and return it rather than the struct containing it.
# Otherwise SOAPproxy will return the struct with all the
# elements as attributes.
self.unwrap_results = 1
# Automatically convert SOAP complex types, and
# (recursively) public contents into the corresponding
# python types. (Private subobjects have names that start
# with '_'.)
#
# Conversions:
# - faultType --> raise python exception
# - arrayType --> array
# - compoundType --> dictionary
#
self.simplify_objects = 0
# Per-class authorization method. If this is set, before
# calling a any class method, the specified authorization
# method will be called. If it returns 1, the method call
# will proceed, otherwise the call will throw with an
# authorization error.
self.authMethod = None
# Globus Support if pyGlobus.io available
try:
from pyGlobus import io;
d['GSIserver'] = 1
d['GSIclient'] = 1
except:
d['GSIserver'] = 0
d['GSIclient'] = 0
# Server SSL support if M2Crypto.SSL available
try:
from M2Crypto import SSL
d['SSLserver'] = 1
except:
d['SSLserver'] = 0
# Client SSL support if socket.ssl available
try:
from socket import ssl
d['SSLclient'] = 1
except:
d['SSLclient'] = 0
for k, v in kw.items():
if k[0] != '_':
setattr(self, k, v)
def __setattr__(self, name, value):
if name in self.__readonly:
raise AttributeError, "readonly configuration setting"
d = self.__dict__
if name in ('typesNamespace', 'typesNamespaceURI',
'schemaNamespace', 'schemaNamespaceURI'):
if name[-3:] == 'URI':
base, uri = name[:-3], 1
else:
base, uri = name, 0
if type(value) == StringType:
if NS.NSMAP.has_key(value):
n = (value, NS.NSMAP[value])
elif NS.NSMAP_R.has_key(value):
n = (NS.NSMAP_R[value], value)
else:
raise AttributeError, "unknown namespace"
elif type(value) in (ListType, TupleType):
if uri:
n = (value[1], value[0])
else:
n = (value[0], value[1])
else:
raise AttributeError, "unknown namespace type"
d[base], d[base + 'URI'] = n
try:
d['namespaceStyle'] = \
NS.STMAP_R[(d['typesNamespace'], d['schemaNamespace'])]
except:
d['namespaceStyle'] = ''
elif name == 'namespaceStyle':
value = str(value)
if not NS.STMAP.has_key(value):
raise AttributeError, "unknown namespace style"
d[name] = value
n = d['typesNamespace'] = NS.STMAP[value][0]
d['typesNamespaceURI'] = NS.NSMAP[n]
n = d['schemaNamespace'] = NS.STMAP[value][1]
d['schemaNamespaceURI'] = NS.NSMAP[n]
elif name == 'debug':
d[name] = \
d['returnFaultInfo'] = \
d['dumpHeadersIn'] = \
d['dumpHeadersOut'] = \
d['dumpSOAPIn'] = \
d['dumpSOAPOut'] = value
else:
d[name] = value
Config = SOAPConfig()
| Python |
"""
################################################################################
#
# SOAPpy - Cayce Ullman (cayce@actzero.com)
# Brian Matthews (blm@actzero.com)
# Gregory Warnes (Gregory.R.Warnes@Pfizer.com)
# Christopher Blunck (blunck@gst.com)
#
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
from __future__ import nested_scopes
ident = '$Id: Client.py,v 1.27 2005/02/21 20:27:09 warnes Exp $'
from version import __version__
#import xml.sax
import urllib
from types import *
import re
import base64
# SOAPpy modules
from Errors import *
from Config import Config
from Parser import parseSOAPRPC
from SOAPBuilder import buildSOAP
from Utilities import *
from Types import faultType, simplify
################################################################################
# Client
################################################################################
def SOAPUserAgent():
return "SOAPpy " + __version__ + " (pywebsvcs.sf.net)"
class SOAPAddress:
def __init__(self, url, config = Config):
proto, uri = urllib.splittype(url)
# apply some defaults
if uri[0:2] != '//':
if proto != None:
uri = proto + ':' + uri
uri = '//' + uri
proto = 'http'
host, path = urllib.splithost(uri)
try:
int(host)
host = 'localhost:' + host
except:
pass
if not path:
path = '/'
if proto not in ('http', 'https', 'httpg'):
raise IOError, "unsupported SOAP protocol"
if proto == 'httpg' and not config.GSIclient:
raise AttributeError, \
"GSI client not supported by this Python installation"
if proto == 'https' and not config.SSLclient:
raise AttributeError, \
"SSL client not supported by this Python installation"
self.user,host = urllib.splituser(host)
self.proto = proto
self.host = host
self.path = path
def __str__(self):
return "%(proto)s://%(host)s%(path)s" % self.__dict__
__repr__ = __str__
class HTTPTransport:
def getNS(self, original_namespace, data):
"""Extract the (possibly extended) namespace from the returned
SOAP message."""
if type(original_namespace) == StringType:
pattern="xmlns:\w+=['\"](" + original_namespace + "[^'\"]*)['\"]"
match = re.search(pattern, data)
if match:
return match.group(1)
else:
return original_namespace
else:
return original_namespace
# Need a Timeout someday?
def call(self, addr, data, namespace, soapaction = None, encoding = None,
http_proxy = None, config = Config):
import httplib
if not isinstance(addr, SOAPAddress):
addr = SOAPAddress(addr, config)
# Build a request
if http_proxy:
real_addr = http_proxy
real_path = addr.proto + "://" + addr.host + addr.path
else:
real_addr = addr.host
real_path = addr.path
if addr.proto == 'httpg':
from pyGlobus.io import GSIHTTP
r = GSIHTTP(real_addr, tcpAttr = config.tcpAttr)
elif addr.proto == 'https':
r = httplib.HTTPS(real_addr)
else:
r = httplib.HTTP(real_addr)
r.putrequest("POST", real_path)
r.putheader("Host", addr.host)
r.putheader("User-agent", SOAPUserAgent())
t = 'text/xml';
if encoding != None:
t += '; charset="%s"' % encoding
r.putheader("Content-type", t)
r.putheader("Content-length", str(len(data)))
# if user is not a user:passwd format
# we'll receive a failure from the server. . .I guess (??)
if addr.user != None:
val = base64.encodestring(addr.user)
r.putheader('Authorization','Basic ' + val.replace('\012',''))
# This fixes sending either "" or "None"
if soapaction == None or len(soapaction) == 0:
r.putheader("SOAPAction", "")
else:
r.putheader("SOAPAction", '"%s"' % soapaction)
if config.dumpHeadersOut:
s = 'Outgoing HTTP headers'
debugHeader(s)
print "POST %s %s" % (real_path, r._http_vsn_str)
print "Host:", addr.host
print "User-agent: SOAPpy " + __version__ + " (http://pywebsvcs.sf.net)"
print "Content-type:", t
print "Content-length:", len(data)
print 'SOAPAction: "%s"' % soapaction
debugFooter(s)
r.endheaders()
if config.dumpSOAPOut:
s = 'Outgoing SOAP'
debugHeader(s)
print data,
if data[-1] != '\n':
print
debugFooter(s)
# send the payload
r.send(data)
# read response line
code, msg, headers = r.getreply()
if headers:
content_type = headers.get("content-type","text/xml")
content_length = headers.get("Content-length")
else:
content_type=None
content_length=None
# work around OC4J bug which does '<len>, <len>' for some reaason
if content_length:
comma=content_length.find(',')
if comma>0:
content_length = content_length[:comma]
# attempt to extract integer message size
try:
message_len = int(content_length)
except:
message_len = -1
if message_len < 0:
# Content-Length missing or invalid; just read the whole socket
# This won't work with HTTP/1.1 chunked encoding
data = r.getfile().read()
message_len = len(data)
else:
data = r.getfile().read(message_len)
if(config.debug):
print "code=",code
print "msg=", msg
print "headers=", headers
print "content-type=", content_type
print "data=", data
if config.dumpHeadersIn:
s = 'Incoming HTTP headers'
debugHeader(s)
if headers.headers:
print "HTTP/1.? %d %s" % (code, msg)
print "\n".join(map (lambda x: x.strip(), headers.headers))
else:
print "HTTP/0.9 %d %s" % (code, msg)
debugFooter(s)
def startswith(string, val):
return string[0:len(val)] == val
if code == 500 and not \
( startswith(content_type, "text/xml") and message_len > 0 ):
raise HTTPError(code, msg)
if config.dumpSOAPIn:
s = 'Incoming SOAP'
debugHeader(s)
print data,
if (len(data)>0) and (data[-1] != '\n'):
print
debugFooter(s)
if code not in (200, 500):
raise HTTPError(code, msg)
# get the new namespace
if namespace is None:
new_ns = None
else:
new_ns = self.getNS(namespace, data)
# return response payload
return data, new_ns
################################################################################
# SOAP Proxy
################################################################################
class SOAPProxy:
def __init__(self, proxy, namespace = None, soapaction = None,
header = None, methodattrs = None, transport = HTTPTransport,
encoding = 'UTF-8', throw_faults = 1, unwrap_results = None,
http_proxy=None, config = Config, noroot = 0,
simplify_objects=None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
# get default values for unwrap_results and simplify_objects
# from config
if unwrap_results is None:
self.unwrap_results=config.unwrap_results
else:
self.unwrap_results=unwrap_results
if simplify_objects is None:
self.simplify_objects=config.simplify_objects
else:
self.simplify_objects=simplify_objects
self.proxy = SOAPAddress(proxy, config)
self.namespace = namespace
self.soapaction = soapaction
self.header = header
self.methodattrs = methodattrs
self.transport = transport()
self.encoding = encoding
self.throw_faults = throw_faults
self.http_proxy = http_proxy
self.config = config
self.noroot = noroot
# GSI Additions
if hasattr(config, "channel_mode") and \
hasattr(config, "delegation_mode"):
self.channel_mode = config.channel_mode
self.delegation_mode = config.delegation_mode
#end GSI Additions
def invoke(self, method, args):
return self.__call(method, args, {})
def __call(self, name, args, kw, ns = None, sa = None, hd = None,
ma = None):
ns = ns or self.namespace
ma = ma or self.methodattrs
if sa: # Get soapaction
if type(sa) == TupleType:
sa = sa[0]
else:
if self.soapaction:
sa = self.soapaction
else:
sa = name
if hd: # Get header
if type(hd) == TupleType:
hd = hd[0]
else:
hd = self.header
hd = hd or self.header
if ma: # Get methodattrs
if type(ma) == TupleType: ma = ma[0]
else:
ma = self.methodattrs
ma = ma or self.methodattrs
m = buildSOAP(args = args, kw = kw, method = name, namespace = ns,
header = hd, methodattrs = ma, encoding = self.encoding,
config = self.config, noroot = self.noroot)
call_retry = 0
try:
r, self.namespace = self.transport.call(self.proxy, m, ns, sa,
encoding = self.encoding,
http_proxy = self.http_proxy,
config = self.config)
except Exception, ex:
#
# Call failed.
#
# See if we have a fault handling vector installed in our
# config. If we do, invoke it. If it returns a true value,
# retry the call.
#
# In any circumstance other than the fault handler returning
# true, reraise the exception. This keeps the semantics of this
# code the same as without the faultHandler code.
#
if hasattr(self.config, "faultHandler"):
if callable(self.config.faultHandler):
call_retry = self.config.faultHandler(self.proxy, ex)
if not call_retry:
raise
else:
raise
else:
raise
if call_retry:
r, self.namespace = self.transport.call(self.proxy, m, ns, sa,
encoding = self.encoding,
http_proxy = self.http_proxy,
config = self.config)
p, attrs = parseSOAPRPC(r, attrs = 1)
try:
throw_struct = self.throw_faults and \
isinstance (p, faultType)
except:
throw_struct = 0
if throw_struct:
if Config.debug:
print p
raise p
# If unwrap_results=1 and there is only element in the struct,
# SOAPProxy will assume that this element is the result
# and return it rather than the struct containing it.
# Otherwise SOAPproxy will return the struct with all the
# elements as attributes.
if self.unwrap_results:
try:
count = 0
for i in p.__dict__.keys():
if i[0] != "_": # don't count the private stuff
count += 1
t = getattr(p, i)
if count == 1: # Only one piece of data, bubble it up
p = t
except:
pass
# Automatically simplfy SOAP complex types into the
# corresponding python types. (structType --> dict,
# arrayType --> array, etc.)
if self.simplify_objects:
p = simplify(p)
if self.config.returnAllAttrs:
return p, attrs
return p
def _callWithBody(self, body):
return self.__call(None, body, {})
def __getattr__(self, name): # hook to catch method calls
if name == '__del__':
raise AttributeError, name
return self.__Method(self.__call, name, config = self.config)
# To handle attribute wierdness
class __Method:
# Some magic to bind a SOAP method to an RPC server.
# Supports "nested" methods (e.g. examples.getStateName) -- concept
# borrowed from xmlrpc/soaplib -- www.pythonware.com
# Altered (improved?) to let you inline namespaces on a per call
# basis ala SOAP::LITE -- www.soaplite.com
def __init__(self, call, name, ns = None, sa = None, hd = None,
ma = None, config = Config):
self.__call = call
self.__name = name
self.__ns = ns
self.__sa = sa
self.__hd = hd
self.__ma = ma
self.__config = config
return
def __call__(self, *args, **kw):
if self.__name[0] == "_":
if self.__name in ["__repr__","__str__"]:
return self.__repr__()
else:
return self.__f_call(*args, **kw)
else:
return self.__r_call(*args, **kw)
def __getattr__(self, name):
if name == '__del__':
raise AttributeError, name
if self.__name[0] == "_":
# Don't nest method if it is a directive
return self.__class__(self.__call, name, self.__ns,
self.__sa, self.__hd, self.__ma)
return self.__class__(self.__call, "%s.%s" % (self.__name, name),
self.__ns, self.__sa, self.__hd, self.__ma)
def __f_call(self, *args, **kw):
if self.__name == "_ns": self.__ns = args
elif self.__name == "_sa": self.__sa = args
elif self.__name == "_hd": self.__hd = args
elif self.__name == "_ma": self.__ma = args
return self
def __r_call(self, *args, **kw):
return self.__call(self.__name, args, kw, self.__ns, self.__sa,
self.__hd, self.__ma)
def __repr__(self):
return "<%s at %d>" % (self.__class__, id(self))
| Python |
# SOAPpy modules
from Config import Config
from Types import *
from NS import NS
from Utilities import *
import string
import fpconst
import xml.sax
from wstools.XMLname import fromXMLname
try: from M2Crypto import SSL
except: pass
ident = '$Id: Parser.py,v 1.16 2005/02/22 04:29:42 warnes Exp $'
from version import __version__
################################################################################
# SOAP Parser
################################################################################
class RefHolder:
def __init__(self, name, frame):
self.name = name
self.parent = frame
self.pos = len(frame)
self.subpos = frame.namecounts.get(name, 0)
def __repr__(self):
return "<%s %s at %d>" % (self.__class__, self.name, id(self))
def __str__(self):
return "<%s %s at %d>" % (self.__class__, self.name, id(self))
class SOAPParser(xml.sax.handler.ContentHandler):
class Frame:
def __init__(self, name, kind = None, attrs = {}, rules = {}):
self.name = name
self.kind = kind
self.attrs = attrs
self.rules = rules
self.contents = []
self.names = []
self.namecounts = {}
self.subattrs = []
def append(self, name, data, attrs):
self.names.append(name)
self.contents.append(data)
self.subattrs.append(attrs)
if self.namecounts.has_key(name):
self.namecounts[name] += 1
else:
self.namecounts[name] = 1
def _placeItem(self, name, value, pos, subpos = 0, attrs = None):
self.contents[pos] = value
if attrs:
self.attrs.update(attrs)
def __len__(self):
return len(self.contents)
def __repr__(self):
return "<%s %s at %d>" % (self.__class__, self.name, id(self))
def __init__(self, rules = None):
xml.sax.handler.ContentHandler.__init__(self)
self.body = None
self.header = None
self.attrs = {}
self._data = None
self._next = "E" # Keeping state for message validity
self._stack = [self.Frame('SOAP')]
# Make two dictionaries to store the prefix <-> URI mappings, and
# initialize them with the default
self._prem = {NS.XML_T: NS.XML}
self._prem_r = {NS.XML: NS.XML_T}
self._ids = {}
self._refs = {}
self._rules = rules
def startElementNS(self, name, qname, attrs):
# Workaround two sax bugs
if name[0] == None and name[1][0] == ' ':
name = (None, name[1][1:])
else:
name = tuple(name)
# First some checking of the layout of the message
if self._next == "E":
if name[1] != 'Envelope':
raise Error, "expected `SOAP-ENV:Envelope', gto `%s:%s'" % \
(self._prem_r[name[0]], name[1])
if name[0] != NS.ENV:
raise faultType, ("%s:VersionMismatch" % NS.ENV_T,
"Don't understand version `%s' Envelope" % name[0])
else:
self._next = "HorB"
elif self._next == "HorB":
if name[0] == NS.ENV and name[1] in ("Header", "Body"):
self._next = None
else:
raise Error, \
"expected `SOAP-ENV:Header' or `SOAP-ENV:Body', " \
"got `%s'" % self._prem_r[name[0]] + ':' + name[1]
elif self._next == "B":
if name == (NS.ENV, "Body"):
self._next = None
else:
raise Error, "expected `SOAP-ENV:Body', got `%s'" % \
self._prem_r[name[0]] + ':' + name[1]
elif self._next == "":
raise Error, "expected nothing, got `%s'" % \
self._prem_r[name[0]] + ':' + name[1]
if len(self._stack) == 2:
rules = self._rules
else:
try:
rules = self._stack[-1].rules[name[1]]
except:
rules = None
if type(rules) not in (NoneType, DictType):
kind = rules
else:
kind = attrs.get((NS.ENC, 'arrayType'))
if kind != None:
del attrs._attrs[(NS.ENC, 'arrayType')]
i = kind.find(':')
if i >= 0:
kind = (self._prem[kind[:i]], kind[i + 1:])
else:
kind = None
self.pushFrame(self.Frame(name[1], kind, attrs._attrs, rules))
self._data = [] # Start accumulating
def pushFrame(self, frame):
self._stack.append(frame)
def popFrame(self):
return self._stack.pop()
def endElementNS(self, name, qname):
# Workaround two sax bugs
if name[0] == None and name[1][0] == ' ':
ns, name = None, name[1][1:]
else:
ns, name = tuple(name)
name = fromXMLname(name) # convert to SOAP 1.2 XML name encoding
if self._next == "E":
raise Error, "didn't get SOAP-ENV:Envelope"
if self._next in ("HorB", "B"):
raise Error, "didn't get SOAP-ENV:Body"
cur = self.popFrame()
attrs = cur.attrs
idval = None
if attrs.has_key((None, 'id')):
idval = attrs[(None, 'id')]
if self._ids.has_key(idval):
raise Error, "duplicate id `%s'" % idval
del attrs[(None, 'id')]
root = 1
if len(self._stack) == 3:
if attrs.has_key((NS.ENC, 'root')):
root = int(attrs[(NS.ENC, 'root')])
# Do some preliminary checks. First, if root="0" is present,
# the element must have an id. Next, if root="n" is present,
# n something other than 0 or 1, raise an exception.
if root == 0:
if idval == None:
raise Error, "non-root element must have an id"
elif root != 1:
raise Error, "SOAP-ENC:root must be `0' or `1'"
del attrs[(NS.ENC, 'root')]
while 1:
href = attrs.get((None, 'href'))
if href:
if href[0] != '#':
raise Error, "Non-local hrefs are not yet suppported."
if self._data != None and \
string.join(self._data, "").strip() != '':
raise Error, "hrefs can't have data"
href = href[1:]
if self._ids.has_key(href):
data = self._ids[href]
else:
data = RefHolder(name, self._stack[-1])
if self._refs.has_key(href):
self._refs[href].append(data)
else:
self._refs[href] = [data]
del attrs[(None, 'href')]
break
kind = None
if attrs:
for i in NS.XSI_L:
if attrs.has_key((i, 'type')):
kind = attrs[(i, 'type')]
del attrs[(i, 'type')]
if kind != None:
i = kind.find(':')
if i >= 0:
kind = (self._prem[kind[:i]], kind[i + 1:])
else:
# XXX What to do here? (None, kind) is just going to fail in convertType
#print "Kind with no NS:", kind
kind = (None, kind)
null = 0
if attrs:
for i in (NS.XSI, NS.XSI2):
if attrs.has_key((i, 'null')):
null = attrs[(i, 'null')]
del attrs[(i, 'null')]
if attrs.has_key((NS.XSI3, 'nil')):
null = attrs[(NS.XSI3, 'nil')]
del attrs[(NS.XSI3, 'nil')]
## Check for nil
# check for nil='true'
if type(null) in (StringType, UnicodeType):
if null.lower() == 'true':
null = 1
# check for nil=1, but watch out for string values
try:
null = int(null)
except ValueError, e:
if not e[0].startswith("invalid literal for int()"):
raise e
null = 0
if null:
if len(cur) or \
(self._data != None and string.join(self._data, "").strip() != ''):
raise Error, "nils can't have data"
data = None
break
if len(self._stack) == 2:
if (ns, name) == (NS.ENV, "Header"):
self.header = data = headerType(attrs = attrs)
self._next = "B"
break
elif (ns, name) == (NS.ENV, "Body"):
self.body = data = bodyType(attrs = attrs)
self._next = ""
break
elif len(self._stack) == 3 and self._next == None:
if (ns, name) == (NS.ENV, "Fault"):
data = faultType()
self._next = None # allow followons
break
#print "\n"
#print "data=", self._data
#print "kind=", kind
#print "cur.kind=", cur.kind
#print "cur.rules=", cur.rules
#print "\n"
if cur.rules != None:
rule = cur.rules
if type(rule) in (StringType, UnicodeType):
rule = (None, rule) # none flags special handling
elif type(rule) == ListType:
rule = tuple(rule)
#print "kind=",kind
#print "rule=",rule
# XXX What if rule != kind?
if callable(rule):
data = rule(string.join(self._data, ""))
elif type(rule) == DictType:
data = structType(name = (ns, name), attrs = attrs)
elif rule[1][:9] == 'arrayType':
data = self.convertType(cur.contents,
rule, attrs)
else:
data = self.convertType(string.join(self._data, ""),
rule, attrs)
break
#print "No rules, using kind or cur.kind..."
if (kind == None and cur.kind != None) or \
(kind == (NS.ENC, 'Array')):
kind = cur.kind
if kind == None:
kind = 'ur-type[%d]' % len(cur)
else:
kind = kind[1]
if len(cur.namecounts) == 1:
elemsname = cur.names[0]
else:
elemsname = None
data = self.startArray((ns, name), kind, attrs, elemsname)
break
if len(self._stack) == 3 and kind == None and \
len(cur) == 0 and \
(self._data == None or string.join(self._data, "").strip() == ''):
data = structType(name = (ns, name), attrs = attrs)
break
if len(cur) == 0 and ns != NS.URN:
# Nothing's been added to the current frame so it must be a
# simple type.
if kind == None:
# If the current item's container is an array, it will
# have a kind. If so, get the bit before the first [,
# which is the type of the array, therefore the type of
# the current item.
kind = self._stack[-1].kind
if kind != None:
i = kind[1].find('[')
if i >= 0:
kind = (kind[0], kind[1][:i])
elif ns != None:
kind = (ns, name)
if kind != None:
try:
data = self.convertType(string.join(self._data, ""),
kind, attrs)
except UnknownTypeError:
data = None
else:
data = None
if data == None:
if self._data == None:
data = ''
else:
data = string.join(self._data, "")
if len(attrs) == 0:
try: data = str(data)
except: pass
break
data = structType(name = (ns, name), attrs = attrs)
break
if isinstance(data, compoundType):
for i in range(len(cur)):
v = cur.contents[i]
data._addItem(cur.names[i], v, cur.subattrs[i])
if isinstance(v, RefHolder):
v.parent = data
if root:
self._stack[-1].append(name, data, attrs)
if idval != None:
self._ids[idval] = data
if self._refs.has_key(idval):
for i in self._refs[idval]:
i.parent._placeItem(i.name, data, i.pos, i.subpos, attrs)
del self._refs[idval]
self.attrs[id(data)] = attrs
if isinstance(data, anyType):
data._setAttrs(attrs)
self._data = None # Stop accumulating
def endDocument(self):
if len(self._refs) == 1:
raise Error, \
"unresolved reference " + self._refs.keys()[0]
elif len(self._refs) > 1:
raise Error, \
"unresolved references " + ', '.join(self._refs.keys())
def startPrefixMapping(self, prefix, uri):
self._prem[prefix] = uri
self._prem_r[uri] = prefix
def endPrefixMapping(self, prefix):
try:
del self._prem_r[self._prem[prefix]]
del self._prem[prefix]
except:
pass
def characters(self, c):
if self._data != None:
self._data.append(c)
arrayre = '^(?:(?P<ns>[^:]*):)?' \
'(?P<type>[^[]+)' \
'(?:\[(?P<rank>,*)\])?' \
'(?:\[(?P<asize>\d+(?:,\d+)*)?\])$'
def startArray(self, name, kind, attrs, elemsname):
if type(self.arrayre) == StringType:
self.arrayre = re.compile (self.arrayre)
offset = attrs.get((NS.ENC, "offset"))
if offset != None:
del attrs[(NS.ENC, "offset")]
try:
if offset[0] == '[' and offset[-1] == ']':
offset = int(offset[1:-1])
if offset < 0:
raise Exception
else:
raise Exception
except:
raise AttributeError, "invalid Array offset"
else:
offset = 0
try:
m = self.arrayre.search(kind)
if m == None:
raise Exception
t = m.group('type')
if t == 'ur-type':
return arrayType(None, name, attrs, offset, m.group('rank'),
m.group('asize'), elemsname)
elif m.group('ns') != None:
return typedArrayType(None, name,
(self._prem[m.group('ns')], t), attrs, offset,
m.group('rank'), m.group('asize'), elemsname)
else:
return typedArrayType(None, name, (None, t), attrs, offset,
m.group('rank'), m.group('asize'), elemsname)
except:
raise AttributeError, "invalid Array type `%s'" % kind
# Conversion
class DATETIMECONSTS:
SIGNre = '(?P<sign>-?)'
CENTURYre = '(?P<century>\d{2,})'
YEARre = '(?P<year>\d{2})'
MONTHre = '(?P<month>\d{2})'
DAYre = '(?P<day>\d{2})'
HOURre = '(?P<hour>\d{2})'
MINUTEre = '(?P<minute>\d{2})'
SECONDre = '(?P<second>\d{2}(?:\.\d*)?)'
TIMEZONEre = '(?P<zulu>Z)|(?P<tzsign>[-+])(?P<tzhour>\d{2}):' \
'(?P<tzminute>\d{2})'
BOSre = '^\s*'
EOSre = '\s*$'
__allres = {'sign': SIGNre, 'century': CENTURYre, 'year': YEARre,
'month': MONTHre, 'day': DAYre, 'hour': HOURre,
'minute': MINUTEre, 'second': SECONDre, 'timezone': TIMEZONEre,
'b': BOSre, 'e': EOSre}
dateTime = '%(b)s%(sign)s%(century)s%(year)s-%(month)s-%(day)sT' \
'%(hour)s:%(minute)s:%(second)s(%(timezone)s)?%(e)s' % __allres
timeInstant = dateTime
timePeriod = dateTime
time = '%(b)s%(hour)s:%(minute)s:%(second)s(%(timezone)s)?%(e)s' % \
__allres
date = '%(b)s%(sign)s%(century)s%(year)s-%(month)s-%(day)s' \
'(%(timezone)s)?%(e)s' % __allres
century = '%(b)s%(sign)s%(century)s(%(timezone)s)?%(e)s' % __allres
gYearMonth = '%(b)s%(sign)s%(century)s%(year)s-%(month)s' \
'(%(timezone)s)?%(e)s' % __allres
gYear = '%(b)s%(sign)s%(century)s%(year)s(%(timezone)s)?%(e)s' % \
__allres
year = gYear
gMonthDay = '%(b)s--%(month)s-%(day)s(%(timezone)s)?%(e)s' % __allres
recurringDate = gMonthDay
gDay = '%(b)s---%(day)s(%(timezone)s)?%(e)s' % __allres
recurringDay = gDay
gMonth = '%(b)s--%(month)s--(%(timezone)s)?%(e)s' % __allres
month = gMonth
recurringInstant = '%(b)s%(sign)s(%(century)s|-)(%(year)s|-)-' \
'(%(month)s|-)-(%(day)s|-)T' \
'(%(hour)s|-):(%(minute)s|-):(%(second)s|-)' \
'(%(timezone)s)?%(e)s' % __allres
duration = '%(b)s%(sign)sP' \
'((?P<year>\d+)Y)?' \
'((?P<month>\d+)M)?' \
'((?P<day>\d+)D)?' \
'((?P<sep>T)' \
'((?P<hour>\d+)H)?' \
'((?P<minute>\d+)M)?' \
'((?P<second>\d*(?:\.\d*)?)S)?)?%(e)s' % \
__allres
timeDuration = duration
# The extra 31 on the front is:
# - so the tuple is 1-based
# - so months[month-1] is December's days if month is 1
months = (31, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
def convertDateTime(self, value, kind):
def getZoneOffset(d):
zoffs = 0
try:
if d['zulu'] == None:
zoffs = 60 * int(d['tzhour']) + int(d['tzminute'])
if d['tzsign'] != '-':
zoffs = -zoffs
except TypeError:
pass
return zoffs
def applyZoneOffset(months, zoffs, date, minfield, posday = 1):
if zoffs == 0 and (minfield > 4 or 0 <= date[5] < 60):
return date
if minfield > 5: date[5] = 0
if minfield > 4: date[4] = 0
if date[5] < 0:
date[4] += int(date[5]) / 60
date[5] %= 60
date[4] += zoffs
if minfield > 3 or 0 <= date[4] < 60: return date
date[3] += date[4] / 60
date[4] %= 60
if minfield > 2 or 0 <= date[3] < 24: return date
date[2] += date[3] / 24
date[3] %= 24
if minfield > 1:
if posday and date[2] <= 0:
date[2] += 31 # zoffs is at most 99:59, so the
# day will never be less than -3
return date
while 1:
# The date[1] == 3 (instead of == 2) is because we're
# going back a month, so we need to know if the previous
# month is February, so we test if this month is March.
leap = minfield == 0 and date[1] == 3 and \
date[0] % 4 == 0 and \
(date[0] % 100 != 0 or date[0] % 400 == 0)
if 0 < date[2] <= months[date[1]] + leap: break
date[2] += months[date[1] - 1] + leap
date[1] -= 1
if date[1] > 0: break
date[1] = 12
if minfield > 0: break
date[0] -= 1
return date
try:
exp = getattr(self.DATETIMECONSTS, kind)
except AttributeError:
return None
if type(exp) == StringType:
exp = re.compile(exp)
setattr (self.DATETIMECONSTS, kind, exp)
m = exp.search(value)
try:
if m == None:
raise Exception
d = m.groupdict()
f = ('century', 'year', 'month', 'day',
'hour', 'minute', 'second')
fn = len(f) # Index of first non-None value
r = []
if kind in ('duration', 'timeDuration'):
if d['sep'] != None and d['hour'] == None and \
d['minute'] == None and d['second'] == None:
raise Exception
f = f[1:]
for i in range(len(f)):
s = d[f[i]]
if s != None:
if f[i] == 'second':
s = float(s)
else:
try: s = int(s)
except ValueError: s = long(s)
if i < fn: fn = i
r.append(s)
if fn > len(r): # Any non-Nones?
raise Exception
if d['sign'] == '-':
r[fn] = -r[fn]
return tuple(r)
if kind == 'recurringInstant':
for i in range(len(f)):
s = d[f[i]]
if s == None or s == '-':
if i > fn:
raise Exception
s = None
else:
if i < fn:
fn = i
if f[i] == 'second':
s = float(s)
else:
try:
s = int(s)
except ValueError:
s = long(s)
r.append(s)
s = r.pop(0)
if fn == 0:
r[0] += s * 100
else:
fn -= 1
if fn < len(r) and d['sign'] == '-':
r[fn] = -r[fn]
cleanDate(r, fn)
return tuple(applyZoneOffset(self.DATETIMECONSTS.months,
getZoneOffset(d), r, fn, 0))
r = [0, 0, 1, 1, 0, 0, 0]
for i in range(len(f)):
field = f[i]
s = d.get(field)
if s != None:
if field == 'second':
s = float(s)
else:
try:
s = int(s)
except ValueError:
s = long(s)
if i < fn:
fn = i
r[i] = s
if fn > len(r): # Any non-Nones?
raise Exception
s = r.pop(0)
if fn == 0:
r[0] += s * 100
else:
fn -= 1
if d.get('sign') == '-':
r[fn] = -r[fn]
cleanDate(r, fn)
zoffs = getZoneOffset(d)
if zoffs:
r = applyZoneOffset(self.DATETIMECONSTS.months, zoffs, r, fn)
if kind == 'century':
return r[0] / 100
s = []
for i in range(1, len(f)):
if d.has_key(f[i]):
s.append(r[i - 1])
if len(s) == 1:
return s[0]
return tuple(s)
except Exception, e:
raise Error, "invalid %s value `%s' - %s" % (kind, value, e)
intlimits = \
{
'nonPositiveInteger': (0, None, 0),
'non-positive-integer': (0, None, 0),
'negativeInteger': (0, None, -1),
'negative-integer': (0, None, -1),
'long': (1, -9223372036854775808L,
9223372036854775807L),
'int': (0, -2147483648L, 2147483647),
'short': (0, -32768, 32767),
'byte': (0, -128, 127),
'nonNegativeInteger': (0, 0, None),
'non-negative-integer': (0, 0, None),
'positiveInteger': (0, 1, None),
'positive-integer': (0, 1, None),
'unsignedLong': (1, 0, 18446744073709551615L),
'unsignedInt': (0, 0, 4294967295L),
'unsignedShort': (0, 0, 65535),
'unsignedByte': (0, 0, 255),
}
floatlimits = \
{
'float': (7.0064923216240861E-46, -3.4028234663852886E+38,
3.4028234663852886E+38),
'double': (2.4703282292062327E-324, -1.7976931348623158E+308,
1.7976931348623157E+308),
}
zerofloatre = '[1-9]'
def convertType(self, d, t, attrs, config=Config):
if t[0] is None and t[1] is not None:
type = t[1].strip()
if type[:9] == 'arrayType':
index_eq = type.find('=')
index_obr = type.find('[')
index_cbr = type.find(']')
elemtype = type[index_eq+1:index_obr]
elemnum = type[index_obr+1:index_cbr]
if elemtype=="ur-type":
return(d)
else:
newarr = map( lambda(di):
self.convertToBasicTypes(d=di,
t = ( NS.XSD, elemtype),
attrs=attrs,
config=config),
d)
return newarr
else:
t = (NS.XSD, t[1])
return self.convertToBasicTypes(d, t, attrs, config)
def convertToSOAPpyTypes(self, d, t, attrs, config=Config):
pass
def convertToBasicTypes(self, d, t, attrs, config=Config):
dnn = d or ''
#if Config.debug:
#print "convertToBasicTypes:"
#print " requested_type=", t
#print " data=", d
if t[0] in NS.EXSD_L:
if t[1] == "integer":
try:
d = int(d)
if len(attrs):
d = long(d)
except:
d = long(d)
return d
if self.intlimits.has_key (t[1]): # integer types
l = self.intlimits[t[1]]
try: d = int(d)
except: d = long(d)
if l[1] != None and d < l[1]:
raise UnderflowError, "%s too small" % d
if l[2] != None and d > l[2]:
raise OverflowError, "%s too large" % d
if l[0] or len(attrs):
return long(d)
return d
if t[1] == "string":
if len(attrs):
return unicode(dnn)
try:
return str(dnn)
except:
return dnn
if t[1] == "boolean":
d = d.strip().lower()
if d in ('0', 'false'):
return 0
if d in ('1', 'true'):
return 1
raise AttributeError, "invalid boolean value"
if t[1] in ('double','float'):
l = self.floatlimits[t[1]]
s = d.strip().lower()
d = float(s)
if config.strict_range:
if d < l[1]: raise UnderflowError
if d > l[2]: raise OverflowError
else:
# some older SOAP impementations (notably SOAP4J,
# Apache SOAP) return "infinity" instead of "INF"
# so check the first 3 characters for a match.
if s == "nan":
return fpconst.NaN
elif s[0:3] in ("inf", "+inf"):
return fpconst.PosInf
elif s[0:3] == "-inf":
return fpconst.NegInf
if fpconst.isNaN(d):
if s != 'nan':
raise ValueError, "invalid %s: %s" % (t[1], s)
elif fpconst.isNegInf(d):
if s != '-inf':
raise UnderflowError, "%s too small: %s" % (t[1], s)
elif fpconst.isPosInf(d):
if s != 'inf':
raise OverflowError, "%s too large: %s" % (t[1], s)
elif d < 0 and d < l[1]:
raise UnderflowError, "%s too small: %s" % (t[1], s)
elif d > 0 and ( d < l[0] or d > l[2] ):
raise OverflowError, "%s too large: %s" % (t[1], s)
elif d == 0:
if type(self.zerofloatre) == StringType:
self.zerofloatre = re.compile(self.zerofloatre)
if self.zerofloatre.search(s):
raise UnderflowError, "invalid %s: %s" % (t[1], s)
return d
if t[1] in ("dateTime", "date", "timeInstant", "time"):
return self.convertDateTime(d, t[1])
if t[1] == "decimal":
return float(d)
if t[1] in ("language", "QName", "NOTATION", "NMTOKEN", "Name",
"NCName", "ID", "IDREF", "ENTITY"):
return collapseWhiteSpace(d)
if t[1] in ("IDREFS", "ENTITIES", "NMTOKENS"):
d = collapseWhiteSpace(d)
return d.split()
if t[0] in NS.XSD_L:
if t[1] in ("base64", "base64Binary"):
if d:
return base64.decodestring(d)
else:
return ''
if t[1] == "hexBinary":
if d:
return decodeHexString(d)
else:
return
if t[1] == "anyURI":
return urllib.unquote(collapseWhiteSpace(d))
if t[1] in ("normalizedString", "token"):
return collapseWhiteSpace(d)
if t[0] == NS.ENC:
if t[1] == "base64":
if d:
return base64.decodestring(d)
else:
return ''
if t[0] == NS.XSD:
if t[1] == "binary":
try:
e = attrs[(None, 'encoding')]
if d:
if e == 'hex':
return decodeHexString(d)
elif e == 'base64':
return base64.decodestring(d)
else:
return ''
except:
pass
raise Error, "unknown or missing binary encoding"
if t[1] == "uri":
return urllib.unquote(collapseWhiteSpace(d))
if t[1] == "recurringInstant":
return self.convertDateTime(d, t[1])
if t[0] in (NS.XSD2, NS.ENC):
if t[1] == "uriReference":
return urllib.unquote(collapseWhiteSpace(d))
if t[1] == "timePeriod":
return self.convertDateTime(d, t[1])
if t[1] in ("century", "year"):
return self.convertDateTime(d, t[1])
if t[0] in (NS.XSD, NS.XSD2, NS.ENC):
if t[1] == "timeDuration":
return self.convertDateTime(d, t[1])
if t[0] == NS.XSD3:
if t[1] == "anyURI":
return urllib.unquote(collapseWhiteSpace(d))
if t[1] in ("gYearMonth", "gMonthDay"):
return self.convertDateTime(d, t[1])
if t[1] == "gYear":
return self.convertDateTime(d, t[1])
if t[1] == "gMonth":
return self.convertDateTime(d, t[1])
if t[1] == "gDay":
return self.convertDateTime(d, t[1])
if t[1] == "duration":
return self.convertDateTime(d, t[1])
if t[0] in (NS.XSD2, NS.XSD3):
if t[1] == "token":
return collapseWhiteSpace(d)
if t[1] == "recurringDate":
return self.convertDateTime(d, t[1])
if t[1] == "month":
return self.convertDateTime(d, t[1])
if t[1] == "recurringDay":
return self.convertDateTime(d, t[1])
if t[0] == NS.XSD2:
if t[1] == "CDATA":
return collapseWhiteSpace(d)
raise UnknownTypeError, "unknown type `%s'" % (str(t[0]) + ':' + t[1])
################################################################################
# call to SOAPParser that keeps all of the info
################################################################################
def _parseSOAP(xml_str, rules = None):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
parser = xml.sax.make_parser()
t = SOAPParser(rules = rules)
parser.setContentHandler(t)
e = xml.sax.handler.ErrorHandler()
parser.setErrorHandler(e)
inpsrc = xml.sax.xmlreader.InputSource()
inpsrc.setByteStream(StringIO(xml_str))
# turn on namespace mangeling
parser.setFeature(xml.sax.handler.feature_namespaces,1)
try:
parser.parse(inpsrc)
except xml.sax.SAXParseException, e:
parser._parser = None
raise e
return t
################################################################################
# SOAPParser's more public interface
################################################################################
def parseSOAP(xml_str, attrs = 0):
t = _parseSOAP(xml_str)
if attrs:
return t.body, t.attrs
return t.body
def parseSOAPRPC(xml_str, header = 0, body = 0, attrs = 0, rules = None):
t = _parseSOAP(xml_str, rules = rules)
p = t.body[0]
# Empty string, for RPC this translates into a void
if type(p) in (type(''), type(u'')) and p in ('', u''):
name = "Response"
for k in t.body.__dict__.keys():
if k[0] != "_":
name = k
p = structType(name)
if header or body or attrs:
ret = (p,)
if header : ret += (t.header,)
if body: ret += (t.body,)
if attrs: ret += (t.attrs,)
return ret
else:
return p
| Python |
"""This file is here for backward compatibility with versions <= 0.9.9
Delete when 1.0.0 is released!
"""
ident = '$Id: SOAP.py,v 1.38 2004/01/31 04:20:06 warnes Exp $'
from version import __version__
from Client import *
from Config import *
from Errors import *
from NS import *
from Parser import *
from SOAPBuilder import *
from Server import *
from Types import *
from Utilities import *
import wstools
import WSDL
from warnings import warn
warn("""
The sub-module SOAPpy.SOAP is deprecated and is only
provided for short-term backward compatibility. Objects are now
available directly within the SOAPpy module. Thus, instead of
from SOAPpy import SOAP
...
SOAP.SOAPProxy(...)
use
from SOAPpy import SOAPProxy
...
SOAPProxy(...)
instead.
""", DeprecationWarning)
| Python |
__version__="0.12.0"
| Python |
"""
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: Utilities.py,v 1.4 2004/01/31 04:20:06 warnes Exp $'
from version import __version__
import exceptions
import copy
import re
import string
import sys
from types import *
# SOAPpy modules
from Errors import *
################################################################################
# Utility infielders
################################################################################
def collapseWhiteSpace(s):
return re.sub('\s+', ' ', s).strip()
def decodeHexString(data):
conv = {
'0': 0x0, '1': 0x1, '2': 0x2, '3': 0x3, '4': 0x4,
'5': 0x5, '6': 0x6, '7': 0x7, '8': 0x8, '9': 0x9,
'a': 0xa, 'b': 0xb, 'c': 0xc, 'd': 0xd, 'e': 0xe,
'f': 0xf,
'A': 0xa, 'B': 0xb, 'C': 0xc, 'D': 0xd, 'E': 0xe,
'F': 0xf,
}
ws = string.whitespace
bin = ''
i = 0
while i < len(data):
if data[i] not in ws:
break
i += 1
low = 0
while i < len(data):
c = data[i]
if c in string.whitespace:
break
try:
c = conv[c]
except KeyError:
raise ValueError, \
"invalid hex string character `%s'" % c
if low:
bin += chr(high * 16 + c)
low = 0
else:
high = c
low = 1
i += 1
if low:
raise ValueError, "invalid hex string length"
while i < len(data):
if data[i] not in string.whitespace:
raise ValueError, \
"invalid hex string character `%s'" % c
i += 1
return bin
def encodeHexString(data):
h = ''
for i in data:
h += "%02X" % ord(i)
return h
def leapMonth(year, month):
return month == 2 and \
year % 4 == 0 and \
(year % 100 != 0 or year % 400 == 0)
def cleanDate(d, first = 0):
ranges = (None, (1, 12), (1, 31), (0, 23), (0, 59), (0, 61))
months = (0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
names = ('year', 'month', 'day', 'hours', 'minutes', 'seconds')
if len(d) != 6:
raise ValueError, "date must have 6 elements"
for i in range(first, 6):
s = d[i]
if type(s) == FloatType:
if i < 5:
try:
s = int(s)
except OverflowError:
if i > 0:
raise
s = long(s)
if s != d[i]:
raise ValueError, "%s must be integral" % names[i]
d[i] = s
elif type(s) == LongType:
try: s = int(s)
except: pass
elif type(s) != IntType:
raise TypeError, "%s isn't a valid type" % names[i]
if i == first and s < 0:
continue
if ranges[i] != None and \
(s < ranges[i][0] or ranges[i][1] < s):
raise ValueError, "%s out of range" % names[i]
if first < 6 and d[5] >= 61:
raise ValueError, "seconds out of range"
if first < 2:
leap = first < 1 and leapMonth(d[0], d[1])
if d[2] > months[d[1]] + leap:
raise ValueError, "day out of range"
def debugHeader(title):
s = '*** ' + title + ' '
print s + ('*' * (72 - len(s)))
def debugFooter(title):
print '*' * 72
sys.stdout.flush()
| Python |
"""Provide a class for loading data from URL's that handles basic
authentication"""
ident = '$Id: URLopener.py,v 1.2 2004/01/31 04:20:06 warnes Exp $'
from version import __version__
from Config import Config
from urllib import FancyURLopener
class URLopener(FancyURLopener):
username = None
passwd = None
def __init__(self, username=None, passwd=None, *args, **kw):
FancyURLopener.__init__( self, *args, **kw)
self.username = username
self.passwd = passwd
def prompt_user_passwd(self, host, realm):
return self.username, self.passwd
| Python |
"""
################################################################################
#
# SOAPpy - Cayce Ullman (cayce@actzero.com)
# Brian Matthews (blm@actzero.com)
# Gregory Warnes (Gregory.R.Warnes@Pfizer.com)
# Christopher Blunck (blunck@gst.com)
#
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
from __future__ import nested_scopes
ident = '$Id: Server.py,v 1.21 2005/02/15 16:32:22 warnes Exp $'
from version import __version__
#import xml.sax
import re
import socket
import sys
import SocketServer
from types import *
import BaseHTTPServer
import thread
# SOAPpy modules
from Parser import parseSOAPRPC
from Config import Config
from Types import faultType, voidType, simplify
from NS import NS
from SOAPBuilder import buildSOAP
from Utilities import debugHeader, debugFooter
try: from M2Crypto import SSL
except: pass
ident = '$Id: Server.py,v 1.21 2005/02/15 16:32:22 warnes Exp $'
from version import __version__
################################################################################
# Call context dictionary
################################################################################
_contexts = dict()
def GetSOAPContext():
global _contexts
return _contexts[thread.get_ident()]
################################################################################
# Server
################################################################################
# Method Signature class for adding extra info to registered funcs, right now
# used just to indicate it should be called with keywords, instead of ordered
# params.
class MethodSig:
def __init__(self, func, keywords=0, context=0):
self.func = func
self.keywords = keywords
self.context = context
self.__name__ = func.__name__
def __call__(self, *args, **kw):
return apply(self.func,args,kw)
class SOAPContext:
def __init__(self, header, body, attrs, xmldata, connection, httpheaders,
soapaction):
self.header = header
self.body = body
self.attrs = attrs
self.xmldata = xmldata
self.connection = connection
self.httpheaders= httpheaders
self.soapaction = soapaction
# A class to describe how header messages are handled
class HeaderHandler:
# Initially fail out if there are any problems.
def __init__(self, header, attrs):
for i in header.__dict__.keys():
if i[0] == "_":
continue
d = getattr(header, i)
try:
fault = int(attrs[id(d)][(NS.ENV, 'mustUnderstand')])
except:
fault = 0
if fault:
raise faultType, ("%s:MustUnderstand" % NS.ENV_T,
"Required Header Misunderstood",
"%s" % i)
################################################################################
# SOAP Server
################################################################################
class SOAPServerBase:
def get_request(self):
sock, addr = SocketServer.TCPServer.get_request(self)
if self.ssl_context:
sock = SSL.Connection(self.ssl_context, sock)
sock._setup_ssl(addr)
if sock.accept_ssl() != 1:
raise socket.error, "Couldn't accept SSL connection"
return sock, addr
def registerObject(self, object, namespace = '', path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
self.objmap[namespace] = object
def registerFunction(self, function, namespace = '', funcName = None,
path = ''):
if not funcName : funcName = function.__name__
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
if self.funcmap.has_key(namespace):
self.funcmap[namespace][funcName] = function
else:
self.funcmap[namespace] = {funcName : function}
def registerKWObject(self, object, namespace = '', path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
for i in dir(object.__class__):
if i[0] != "_" and callable(getattr(object, i)):
self.registerKWFunction(getattr(object,i), namespace)
# convenience - wraps your func for you.
def registerKWFunction(self, function, namespace = '', funcName = None,
path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
self.registerFunction(MethodSig(function,keywords=1), namespace,
funcName)
def unregisterObject(self, object, namespace = '', path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
del self.objmap[namespace]
class SOAPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def version_string(self):
return '<a href="http://pywebsvcs.sf.net">' + \
'SOAPpy ' + __version__ + '</a> (Python ' + \
sys.version.split()[0] + ')'
def date_time_string(self):
self.__last_date_time_string = \
BaseHTTPServer.BaseHTTPRequestHandler.\
date_time_string(self)
return self.__last_date_time_string
def do_POST(self):
global _contexts
status = 500
try:
if self.server.config.dumpHeadersIn:
s = 'Incoming HTTP headers'
debugHeader(s)
print self.raw_requestline.strip()
print "\n".join(map (lambda x: x.strip(),
self.headers.headers))
debugFooter(s)
data = self.rfile.read(int(self.headers["Content-length"]))
if self.server.config.dumpSOAPIn:
s = 'Incoming SOAP'
debugHeader(s)
print data,
if data[-1] != '\n':
print
debugFooter(s)
(r, header, body, attrs) = \
parseSOAPRPC(data, header = 1, body = 1, attrs = 1)
method = r._name
args = r._aslist()
kw = r._asdict()
if Config.simplify_objects:
args = simplify(args)
kw = simplify(kw)
# Handle mixed named and unnamed arguments by assuming
# that all arguments with names of the form "v[0-9]+"
# are unnamed and should be passed in numeric order,
# other arguments are named and should be passed using
# this name.
# This is a non-standard exension to the SOAP protocol,
# but is supported by Apache AXIS.
# It is enabled by default. To disable, set
# Config.specialArgs to False.
if Config.specialArgs:
ordered_args = {}
named_args = {}
for (k,v) in kw.items():
if k[0]=="v":
try:
i = int(k[1:])
ordered_args[i] = v
except ValueError:
named_args[str(k)] = v
else:
named_args[str(k)] = v
# We have to decide namespace precedence
# I'm happy with the following scenario
# if r._ns is specified use it, if not check for
# a path, if it's specified convert it and use it as the
# namespace. If both are specified, use r._ns.
ns = r._ns
if len(self.path) > 1 and not ns:
ns = self.path.replace("/", ":")
if ns[0] == ":": ns = ns[1:]
# authorization method
a = None
keylist = ordered_args.keys()
keylist.sort()
# create list in proper order w/o names
tmp = map( lambda x: ordered_args[x], keylist)
ordered_args = tmp
#print '<-> Argument Matching Yielded:'
#print '<-> Ordered Arguments:' + str(ordered_args)
#print '<-> Named Arguments :' + str(named_args)
resp = ""
# For fault messages
if ns:
nsmethod = "%s:%s" % (ns, method)
else:
nsmethod = method
try:
# First look for registered functions
if self.server.funcmap.has_key(ns) and \
self.server.funcmap[ns].has_key(method):
f = self.server.funcmap[ns][method]
# look for the authorization method
if self.server.config.authMethod != None:
authmethod = self.server.config.authMethod
if self.server.funcmap.has_key(ns) and \
self.server.funcmap[ns].has_key(authmethod):
a = self.server.funcmap[ns][authmethod]
else:
# Now look at registered objects
# Check for nested attributes. This works even if
# there are none, because the split will return
# [method]
f = self.server.objmap[ns]
# Look for the authorization method
if self.server.config.authMethod != None:
authmethod = self.server.config.authMethod
if hasattr(f, authmethod):
a = getattr(f, authmethod)
# then continue looking for the method
l = method.split(".")
for i in l:
f = getattr(f, i)
except:
info = sys.exc_info()
try:
resp = buildSOAP(faultType("%s:Client" % NS.ENV_T,
"Method Not Found",
"%s : %s %s %s" % (nsmethod,
info[0],
info[1],
info[2])),
encoding = self.server.encoding,
config = self.server.config)
finally:
del info
status = 500
else:
try:
if header:
x = HeaderHandler(header, attrs)
fr = 1
# call context book keeping
# We're stuffing the method into the soapaction if there
# isn't one, someday, we'll set that on the client
# and it won't be necessary here
# for now we're doing both
if "SOAPAction".lower() not in self.headers.keys() or \
self.headers["SOAPAction"] == "\"\"":
self.headers["SOAPAction"] = method
thread_id = thread.get_ident()
_contexts[thread_id] = SOAPContext(header, body,
attrs, data,
self.connection,
self.headers,
self.headers["SOAPAction"])
# Do an authorization check
if a != None:
if not apply(a, (), {"_SOAPContext" :
_contexts[thread_id] }):
raise faultType("%s:Server" % NS.ENV_T,
"Authorization failed.",
"%s" % nsmethod)
# If it's wrapped, some special action may be needed
if isinstance(f, MethodSig):
c = None
if f.context: # retrieve context object
c = _contexts[thread_id]
if Config.specialArgs:
if c:
named_args["_SOAPContext"] = c
fr = apply(f, ordered_args, named_args)
elif f.keywords:
# This is lame, but have to de-unicode
# keywords
strkw = {}
for (k, v) in kw.items():
strkw[str(k)] = v
if c:
strkw["_SOAPContext"] = c
fr = apply(f, (), strkw)
elif c:
fr = apply(f, args, {'_SOAPContext':c})
else:
fr = apply(f, args, {})
else:
if Config.specialArgs:
fr = apply(f, ordered_args, named_args)
else:
fr = apply(f, args, {})
if type(fr) == type(self) and \
isinstance(fr, voidType):
resp = buildSOAP(kw = {'%sResponse' % method: fr},
encoding = self.server.encoding,
config = self.server.config)
else:
resp = buildSOAP(kw =
{'%sResponse' % method: {'Result': fr}},
encoding = self.server.encoding,
config = self.server.config)
# Clean up _contexts
if _contexts.has_key(thread_id):
del _contexts[thread_id]
except Exception, e:
import traceback
info = sys.exc_info()
try:
if self.server.config.dumpFaultInfo:
s = 'Method %s exception' % nsmethod
debugHeader(s)
traceback.print_exception(info[0], info[1],
info[2])
debugFooter(s)
if isinstance(e, faultType):
f = e
else:
f = faultType("%s:Server" % NS.ENV_T,
"Method Failed",
"%s" % nsmethod)
if self.server.config.returnFaultInfo:
f._setDetail("".join(traceback.format_exception(
info[0], info[1], info[2])))
elif not hasattr(f, 'detail'):
f._setDetail("%s %s" % (info[0], info[1]))
finally:
del info
resp = buildSOAP(f, encoding = self.server.encoding,
config = self.server.config)
status = 500
else:
status = 200
except faultType, e:
import traceback
info = sys.exc_info()
try:
if self.server.config.dumpFaultInfo:
s = 'Received fault exception'
debugHeader(s)
traceback.print_exception(info[0], info[1],
info[2])
debugFooter(s)
if self.server.config.returnFaultInfo:
e._setDetail("".join(traceback.format_exception(
info[0], info[1], info[2])))
elif not hasattr(e, 'detail'):
e._setDetail("%s %s" % (info[0], info[1]))
finally:
del info
resp = buildSOAP(e, encoding = self.server.encoding,
config = self.server.config)
status = 500
except Exception, e:
# internal error, report as HTTP server error
if self.server.config.dumpFaultInfo:
s = 'Internal exception %s' % e
import traceback
debugHeader(s)
info = sys.exc_info()
try:
traceback.print_exception(info[0], info[1], info[2])
finally:
del info
debugFooter(s)
self.send_response(500)
self.end_headers()
if self.server.config.dumpHeadersOut and \
self.request_version != 'HTTP/0.9':
s = 'Outgoing HTTP headers'
debugHeader(s)
if self.responses.has_key(status):
s = ' ' + self.responses[status][0]
else:
s = ''
print "%s %d%s" % (self.protocol_version, 500, s)
print "Server:", self.version_string()
print "Date:", self.__last_date_time_string
debugFooter(s)
else:
# got a valid SOAP response
self.send_response(status)
t = 'text/xml';
if self.server.encoding != None:
t += '; charset="%s"' % self.server.encoding
self.send_header("Content-type", t)
self.send_header("Content-length", str(len(resp)))
self.end_headers()
if self.server.config.dumpHeadersOut and \
self.request_version != 'HTTP/0.9':
s = 'Outgoing HTTP headers'
debugHeader(s)
if self.responses.has_key(status):
s = ' ' + self.responses[status][0]
else:
s = ''
print "%s %d%s" % (self.protocol_version, status, s)
print "Server:", self.version_string()
print "Date:", self.__last_date_time_string
print "Content-type:", t
print "Content-length:", len(resp)
debugFooter(s)
if self.server.config.dumpSOAPOut:
s = 'Outgoing SOAP'
debugHeader(s)
print resp,
if resp[-1] != '\n':
print
debugFooter(s)
self.wfile.write(resp)
self.wfile.flush()
# We should be able to shut down both a regular and an SSL
# connection, but under Python 2.1, calling shutdown on an
# SSL connections drops the output, so this work-around.
# This should be investigated more someday.
if self.server.config.SSLserver and \
isinstance(self.connection, SSL.Connection):
self.connection.set_shutdown(SSL.SSL_SENT_SHUTDOWN |
SSL.SSL_RECEIVED_SHUTDOWN)
else:
self.connection.shutdown(1)
def do_GET(self):
#print 'command ', self.command
#print 'path ', self.path
#print 'request_version', self.request_version
#print 'headers'
#print ' type ', self.headers.type
#print ' maintype', self.headers.maintype
#print ' subtype ', self.headers.subtype
#print ' params ', self.headers.plist
path = self.path.lower()
if path.endswith('wsdl'):
method = 'wsdl'
function = namespace = None
if self.server.funcmap.has_key(namespace) \
and self.server.funcmap[namespace].has_key(method):
function = self.server.funcmap[namespace][method]
else:
if namespace in self.server.objmap.keys():
function = self.server.objmap[namespace]
l = method.split(".")
for i in l:
function = getattr(function, i)
if function:
self.send_response(200)
self.send_header("Content-type", 'text/plain')
self.end_headers()
response = apply(function, ())
self.wfile.write(str(response))
return
# return error
self.send_response(200)
self.send_header("Content-type", 'text/html')
self.end_headers()
self.wfile.write('''\
<title>
<head>Error!</head>
</title>
<body>
<h1>Oops!</h1>
<p>
This server supports HTTP GET requests only for the the purpose of
obtaining Web Services Description Language (WSDL) for a specific
service.
Either you requested an URL that does not end in "wsdl" or this
server does not implement a wsdl method.
</p>
</body>''')
def log_message(self, format, *args):
if self.server.log:
BaseHTTPServer.BaseHTTPRequestHandler.\
log_message (self, format, *args)
class SOAPServer(SOAPServerBase, SocketServer.TCPServer):
def __init__(self, addr = ('localhost', 8000),
RequestHandler = SOAPRequestHandler, log = 0, encoding = 'UTF-8',
config = Config, namespace = None, ssl_context = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
if ssl_context != None and not config.SSLserver:
raise AttributeError, \
"SSL server not supported by this Python installation"
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.ssl_context = ssl_context
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
SocketServer.TCPServer.__init__(self, addr, RequestHandler)
class ThreadingSOAPServer(SOAPServerBase, SocketServer.ThreadingTCPServer):
def __init__(self, addr = ('localhost', 8000),
RequestHandler = SOAPRequestHandler, log = 0, encoding = 'UTF-8',
config = Config, namespace = None, ssl_context = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
if ssl_context != None and not config.SSLserver:
raise AttributeError, \
"SSL server not supported by this Python installation"
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.ssl_context = ssl_context
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
SocketServer.ThreadingTCPServer.__init__(self, addr, RequestHandler)
# only define class if Unix domain sockets are available
if hasattr(socket, "AF_UNIX"):
class SOAPUnixSocketServer(SOAPServerBase, SocketServer.UnixStreamServer):
def __init__(self, addr = 8000,
RequestHandler = SOAPRequestHandler, log = 0, encoding = 'UTF-8',
config = Config, namespace = None, ssl_context = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
if ssl_context != None and not config.SSLserver:
raise AttributeError, \
"SSL server not supported by this Python installation"
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.ssl_context = ssl_context
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
SocketServer.UnixStreamServer.__init__(self, str(addr), RequestHandler)
| Python |
ident = '$Id: __init__.py,v 1.9 2004/01/31 04:20:06 warnes Exp $'
from version import __version__
from Client import *
from Config import *
from Errors import *
from NS import *
from Parser import *
from SOAPBuilder import *
from Server import *
from Types import *
from Utilities import *
import wstools
import WSDL
| Python |
#! /usr/bin/env python
"""Logging"""
import sys
class ILogger:
'''Logger interface, by default this class
will be used and logging calls are no-ops.
'''
level = 0
def __init__(self, msg):
return
def warning(self, *args):
return
def debug(self, *args):
return
def error(self, *args):
return
def setLevel(cls, level):
cls.level = level
setLevel = classmethod(setLevel)
_LoggerClass = ILogger
class BasicLogger(ILogger):
def __init__(self, msg, out=sys.stdout):
self.msg, self.out = msg, out
def warning(self, msg, *args):
if self.level < 1: return
print >>self, self.WARN, self.msg,
print >>self, msg %args
WARN = 'WARN'
def debug(self, msg, *args):
if self.level < 2: return
print >>self, self.DEBUG, self.msg,
print >>self, msg %args
DEBUG = 'DEBUG'
def error(self, msg, *args):
print >>self, self.ERROR, self.msg,
print >>self, msg %args
ERROR = 'ERROR'
def write(self, *args):
'''Write convenience function; writes strings.
'''
for s in args: self.out.write(s)
def setBasicLogger():
'''Use Basic Logger.
'''
setLoggerClass(BasicLogger)
BasicLogger.setLevel(0)
def setBasicLoggerWARN():
'''Use Basic Logger.
'''
setLoggerClass(BasicLogger)
BasicLogger.setLevel(1)
def setBasicLoggerDEBUG():
'''Use Basic Logger.
'''
setLoggerClass(BasicLogger)
BasicLogger.setLevel(2)
def setLoggerClass(loggingClass):
'''Set Logging Class.
'''
assert issubclass(loggingClass, ILogger), 'loggingClass must subclass ILogger'
global _LoggerClass
_LoggerClass = loggingClass
def setLevel(level=0):
'''Set Global Logging Level.
'''
ILogger.level = level
def getLogger(msg):
'''Return instance of Logging class.
'''
return _LoggerClass(msg)
| Python |
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
ident = "$Id: WSDLTools.py,v 1.32 2005/02/07 17:07:31 irjudson Exp $"
import urllib, weakref
from cStringIO import StringIO
from Namespaces import OASIS, XMLNS, WSA200408, WSA200403, WSA200303
from Utility import Collection, CollectionNS, DOM, ElementProxy
from XMLSchema import XMLSchema, SchemaReader, WSDLToolsAdapter
class WSDLReader:
"""A WSDLReader creates WSDL instances from urls and xml data."""
# Custom subclasses of WSDLReader may wish to implement a caching
# strategy or other optimizations. Because application needs vary
# so widely, we don't try to provide any caching by default.
def loadFromStream(self, stream, name=None):
"""Return a WSDL instance loaded from a stream object."""
document = DOM.loadDocument(stream)
wsdl = WSDL()
if name:
wsdl.location = name
elif hasattr(stream, 'name'):
wsdl.location = stream.name
wsdl.load(document)
return wsdl
def loadFromURL(self, url):
"""Return a WSDL instance loaded from the given url."""
document = DOM.loadFromURL(url)
wsdl = WSDL()
wsdl.location = url
wsdl.load(document)
return wsdl
def loadFromString(self, data):
"""Return a WSDL instance loaded from an xml string."""
return self.loadFromStream(StringIO(data))
def loadFromFile(self, filename):
"""Return a WSDL instance loaded from the given file."""
file = open(filename, 'rb')
try:
wsdl = self.loadFromStream(file)
finally:
file.close()
return wsdl
class WSDL:
"""A WSDL object models a WSDL service description. WSDL objects
may be created manually or loaded from an xml representation
using a WSDLReader instance."""
def __init__(self, targetNamespace=None, strict=1):
self.targetNamespace = targetNamespace or 'urn:this-document.wsdl'
self.documentation = ''
self.location = None
self.document = None
self.name = None
self.services = CollectionNS(self)
self.messages = CollectionNS(self)
self.portTypes = CollectionNS(self)
self.bindings = CollectionNS(self)
self.imports = Collection(self)
self.types = Types(self)
self.extensions = []
self.strict = strict
def __del__(self):
if self.document is not None:
self.document.unlink()
version = '1.1'
def addService(self, name, documentation='', targetNamespace=None):
if self.services.has_key(name):
raise WSDLError(
'Duplicate service element: %s' % name
)
item = Service(name, documentation)
if targetNamespace:
item.targetNamespace = targetNamespace
self.services[name] = item
return item
def addMessage(self, name, documentation='', targetNamespace=None):
if self.messages.has_key(name):
raise WSDLError(
'Duplicate message element: %s.' % name
)
item = Message(name, documentation)
if targetNamespace:
item.targetNamespace = targetNamespace
self.messages[name] = item
return item
def addPortType(self, name, documentation='', targetNamespace=None):
if self.portTypes.has_key(name):
raise WSDLError(
'Duplicate portType element: name'
)
item = PortType(name, documentation)
if targetNamespace:
item.targetNamespace = targetNamespace
self.portTypes[name] = item
return item
def addBinding(self, name, type, documentation='', targetNamespace=None):
if self.bindings.has_key(name):
raise WSDLError(
'Duplicate binding element: %s' % name
)
item = Binding(name, type, documentation)
if targetNamespace:
item.targetNamespace = targetNamespace
self.bindings[name] = item
return item
def addImport(self, namespace, location):
item = ImportElement(namespace, location)
self.imports[namespace] = item
return item
def toDom(self):
""" Generate a DOM representation of the WSDL instance.
Not dealing with generating XML Schema, thus the targetNamespace
of all XML Schema elements or types used by WSDL message parts
needs to be specified via import information items.
"""
namespaceURI = DOM.GetWSDLUri(self.version)
self.document = DOM.createDocument(namespaceURI ,'wsdl:definitions')
# Set up a couple prefixes for easy reading.
child = DOM.getElement(self.document, None)
child.setAttributeNS(None, 'targetNamespace', self.targetNamespace)
child.setAttributeNS(XMLNS.BASE, 'xmlns:wsdl', namespaceURI)
child.setAttributeNS(XMLNS.BASE, 'xmlns:xsd', 'http://www.w3.org/1999/XMLSchema')
child.setAttributeNS(XMLNS.BASE, 'xmlns:soap', 'http://schemas.xmlsoap.org/wsdl/soap/')
child.setAttributeNS(XMLNS.BASE, 'xmlns:tns', self.targetNamespace)
# wsdl:import
for item in self.imports:
item.toDom()
# wsdl:message
for item in self.messages:
item.toDom()
# wsdl:portType
for item in self.portTypes:
item.toDom()
# wsdl:binding
for item in self.bindings:
item.toDom()
# wsdl:service
for item in self.services:
item.toDom()
def load(self, document):
# We save a reference to the DOM document to ensure that elements
# saved as "extensions" will continue to have a meaningful context
# for things like namespace references. The lifetime of the DOM
# document is bound to the lifetime of the WSDL instance.
self.document = document
definitions = DOM.getElement(document, 'definitions', None, None)
if definitions is None:
raise WSDLError(
'Missing <definitions> element.'
)
self.version = DOM.WSDLUriToVersion(definitions.namespaceURI)
NS_WSDL = DOM.GetWSDLUri(self.version)
self.targetNamespace = DOM.getAttr(definitions, 'targetNamespace',
None, None)
self.name = DOM.getAttr(definitions, 'name', None, None)
self.documentation = GetDocumentation(definitions)
# Resolve (recursively) any import elements in the document.
imported = {}
base_location = self.location
while len(DOM.getElements(definitions, 'import', NS_WSDL)):
for element in DOM.getElements(definitions, 'import', NS_WSDL):
location = DOM.getAttr(element, 'location')
location = urllib.basejoin(base_location, location)
self._import(self.document, element, base_location)
#reader = SchemaReader(base_url=self.location)
for element in DOM.getElements(definitions, None, None):
targetNamespace = DOM.getAttr(element, 'targetNamespace')
localName = element.localName
if not DOM.nsUriMatch(element.namespaceURI, NS_WSDL):
if localName == 'schema':
reader = SchemaReader(base_url=self.location)
schema = reader.loadFromNode(WSDLToolsAdapter(self), element)
schema.setBaseUrl(self.location)
self.types.addSchema(schema)
else:
self.extensions.append(element)
continue
elif localName == 'message':
name = DOM.getAttr(element, 'name')
docs = GetDocumentation(element)
message = self.addMessage(name, docs, targetNamespace)
parts = DOM.getElements(element, 'part', NS_WSDL)
message.load(parts)
continue
elif localName == 'portType':
name = DOM.getAttr(element, 'name')
docs = GetDocumentation(element)
ptype = self.addPortType(name, docs, targetNamespace)
#operations = DOM.getElements(element, 'operation', NS_WSDL)
#ptype.load(operations)
ptype.load(element)
continue
elif localName == 'binding':
name = DOM.getAttr(element, 'name')
type = DOM.getAttr(element, 'type', default=None)
if type is None:
raise WSDLError(
'Missing type attribute for binding %s.' % name
)
type = ParseQName(type, element)
docs = GetDocumentation(element)
binding = self.addBinding(name, type, docs, targetNamespace)
operations = DOM.getElements(element, 'operation', NS_WSDL)
binding.load(operations)
binding.load_ex(GetExtensions(element))
continue
elif localName == 'service':
name = DOM.getAttr(element, 'name')
docs = GetDocumentation(element)
service = self.addService(name, docs, targetNamespace)
ports = DOM.getElements(element, 'port', NS_WSDL)
service.load(ports)
service.load_ex(GetExtensions(element))
continue
elif localName == 'types':
self.types.documentation = GetDocumentation(element)
base_location = DOM.getAttr(element, 'base-location')
if base_location:
element.removeAttribute('base-location')
base_location = base_location or self.location
reader = SchemaReader(base_url=base_location)
for item in DOM.getElements(element, None, None):
if item.localName == 'schema':
schema = reader.loadFromNode(WSDLToolsAdapter(self), item)
# XXX <types> could have been imported
#schema.setBaseUrl(self.location)
schema.setBaseUrl(base_location)
self.types.addSchema(schema)
else:
self.types.addExtension(item)
# XXX remove the attribute
# element.removeAttribute('base-location')
continue
def _import(self, document, element, base_location=None):
'''Algo take <import> element's children, clone them,
and add them to the main document. Support for relative
locations is a bit complicated. The orig document context
is lost, so we need to store base location in DOM elements
representing <types>, by creating a special temporary
"base-location" attribute, and <import>, by resolving
the relative "location" and storing it as "location".
document -- document we are loading
element -- DOM Element representing <import>
base_location -- location of document from which this
<import> was gleaned.
'''
namespace = DOM.getAttr(element, 'namespace', default=None)
location = DOM.getAttr(element, 'location', default=None)
if namespace is None or location is None:
raise WSDLError(
'Invalid import element (missing namespace or location).'
)
if base_location:
location = urllib.basejoin(base_location, location)
element.setAttributeNS(None, 'location', location)
obimport = self.addImport(namespace, location)
obimport._loaded = 1
importdoc = DOM.loadFromURL(location)
try:
if location.find('#') > -1:
idref = location.split('#')[-1]
imported = DOM.getElementById(importdoc, idref)
else:
imported = importdoc.documentElement
if imported is None:
raise WSDLError(
'Import target element not found for: %s' % location
)
imported_tns = DOM.findTargetNS(imported)
if imported_tns != namespace:
return
if imported.localName == 'definitions':
imported_nodes = imported.childNodes
else:
imported_nodes = [imported]
parent = element.parentNode
parent.removeChild(element)
for node in imported_nodes:
if node.nodeType != node.ELEMENT_NODE:
continue
child = DOM.importNode(document, node, 1)
parent.appendChild(child)
child.setAttribute('targetNamespace', namespace)
attrsNS = imported._attrsNS
for attrkey in attrsNS.keys():
if attrkey[0] == DOM.NS_XMLNS:
attr = attrsNS[attrkey].cloneNode(1)
child.setAttributeNode(attr)
#XXX Quick Hack, should be in WSDL Namespace.
if child.localName == 'import':
rlocation = child.getAttributeNS(None, 'location')
alocation = urllib.basejoin(location, rlocation)
child.setAttribute('location', alocation)
elif child.localName == 'types':
child.setAttribute('base-location', location)
finally:
importdoc.unlink()
return location
class Element:
"""A class that provides common functions for WSDL element classes."""
def __init__(self, name=None, documentation=''):
self.name = name
self.documentation = documentation
self.extensions = []
def addExtension(self, item):
item.parent = weakref.ref(self)
self.extensions.append(item)
class ImportElement(Element):
def __init__(self, namespace, location):
self.namespace = namespace
self.location = location
def getWSDL(self):
"""Return the WSDL object that contains this Message Part."""
return self.parent().parent()
def toDom(self):
wsdl = self.getWSDL()
ep = ElementProxy(None, DOM.getElement(wsdl.document, None))
epc = ep.createAppendElement(DOM.GetWSDLUri(wsdl.version), 'import')
epc.setAttributeNS(None, 'namespace', self.namespace)
epc.setAttributeNS(None, 'location', self.location)
_loaded = None
class Types(Collection):
default = lambda self,k: k.targetNamespace
def __init__(self, parent):
Collection.__init__(self, parent)
self.documentation = ''
self.extensions = []
def addSchema(self, schema):
name = schema.targetNamespace
self[name] = schema
return schema
def addExtension(self, item):
self.extensions.append(item)
class Message(Element):
def __init__(self, name, documentation=''):
Element.__init__(self, name, documentation)
self.parts = Collection(self)
def addPart(self, name, type=None, element=None):
if self.parts.has_key(name):
raise WSDLError(
'Duplicate message part element: %s' % name
)
if type is None and element is None:
raise WSDLError(
'Missing type or element attribute for part: %s' % name
)
item = MessagePart(name)
item.element = element
item.type = type
self.parts[name] = item
return item
def load(self, elements):
for element in elements:
name = DOM.getAttr(element, 'name')
part = MessagePart(name)
self.parts[name] = part
elemref = DOM.getAttr(element, 'element', default=None)
typeref = DOM.getAttr(element, 'type', default=None)
if typeref is None and elemref is None:
raise WSDLError(
'No type or element attribute for part: %s' % name
)
if typeref is not None:
part.type = ParseTypeRef(typeref, element)
if elemref is not None:
part.element = ParseTypeRef(elemref, element)
def getElementDeclaration(self):
"""Return the XMLSchema.ElementDeclaration instance or None"""
element = None
if self.element:
nsuri,name = self.element
wsdl = self.getWSDL()
if wsdl.types.has_key(nsuri) and wsdl.types[nsuri].elements.has_key(name):
element = wsdl.types[nsuri].elements[name]
return element
def getTypeDefinition(self):
"""Return the XMLSchema.TypeDefinition instance or None"""
type = None
if self.type:
nsuri,name = self.type
wsdl = self.getWSDL()
if wsdl.types.has_key(nsuri) and wsdl.types[nsuri].types.has_key(name):
type = wsdl.types[nsuri].types[name]
return type
def getWSDL(self):
"""Return the WSDL object that contains this Message Part."""
return self.parent().parent()
def toDom(self):
wsdl = self.getWSDL()
ep = ElementProxy(None, DOM.getElement(wsdl.document, None))
epc = ep.createAppendElement(DOM.GetWSDLUri(wsdl.version), 'message')
epc.setAttributeNS(None, 'name', self.name)
for part in self.parts:
part.toDom(epc._getNode())
class MessagePart(Element):
def __init__(self, name):
Element.__init__(self, name, '')
self.element = None
self.type = None
def getWSDL(self):
"""Return the WSDL object that contains this Message Part."""
return self.parent().parent().parent().parent()
def getTypeDefinition(self):
wsdl = self.getWSDL()
nsuri,name = self.type
schema = wsdl.types.get(nsuri, {})
return schema.get(name)
def getElementDeclaration(self):
wsdl = self.getWSDL()
nsuri,name = self.element
schema = wsdl.types.get(nsuri, {})
return schema.get(name)
def toDom(self, node):
"""node -- node representing message"""
wsdl = self.getWSDL()
ep = ElementProxy(None, node)
epc = ep.createAppendElement(DOM.GetWSDLUri(wsdl.version), 'part')
epc.setAttributeNS(None, 'name', self.name)
if self.element is not None:
ns,name = self.element
prefix = epc.getPrefix(ns)
epc.setAttributeNS(None, 'element', '%s:%s'%(prefix,name))
elif self.type is not None:
ns,name = self.type
prefix = epc.getPrefix(ns)
epc.setAttributeNS(None, 'type', '%s:%s'%(prefix,name))
class PortType(Element):
'''PortType has a anyAttribute, thus must provide for an extensible
mechanism for supporting such attributes. ResourceProperties is
specified in WS-ResourceProperties. wsa:Action is specified in
WS-Address.
Instance Data:
name -- name attribute
resourceProperties -- optional. wsr:ResourceProperties attribute,
value is a QName this is Parsed into a (namespaceURI, name)
that represents a Global Element Declaration.
operations
'''
def __init__(self, name, documentation=''):
Element.__init__(self, name, documentation)
self.operations = Collection(self)
self.resourceProperties = None
def getWSDL(self):
return self.parent().parent()
def getTargetNamespace(self):
return self.targetNamespace or self.getWSDL().targetNamespace
def getResourceProperties(self):
return self.resourceProperties
def addOperation(self, name, documentation='', parameterOrder=None):
item = Operation(name, documentation, parameterOrder)
self.operations[name] = item
return item
def load(self, element):
self.name = DOM.getAttr(element, 'name')
self.documentation = GetDocumentation(element)
self.targetNamespace = DOM.getAttr(element, 'targetNamespace')
if DOM.hasAttr(element, 'ResourceProperties', OASIS.PROPERTIES):
rpref = DOM.getAttr(element, 'ResourceProperties', OASIS.PROPERTIES)
self.resourceProperties = ParseQName(rpref, element)
lookfor = (WSA200408, WSA200403, WSA200303,)
NS_WSDL = DOM.GetWSDLUri(self.getWSDL().version)
elements = DOM.getElements(element, 'operation', NS_WSDL)
for element in elements:
name = DOM.getAttr(element, 'name')
docs = GetDocumentation(element)
param_order = DOM.getAttr(element, 'parameterOrder', default=None)
if param_order is not None:
param_order = param_order.split(' ')
operation = self.addOperation(name, docs, param_order)
item = DOM.getElement(element, 'input', None, None)
if item is not None:
name = DOM.getAttr(item, 'name')
docs = GetDocumentation(item)
msgref = DOM.getAttr(item, 'message')
message = ParseQName(msgref, item)
for WSA in lookfor:
action = DOM.getAttr(item, 'Action', WSA.ADDRESS, None)
if action: break
operation.setInput(message, name, docs, action)
item = DOM.getElement(element, 'output', None, None)
if item is not None:
name = DOM.getAttr(item, 'name')
docs = GetDocumentation(item)
msgref = DOM.getAttr(item, 'message')
message = ParseQName(msgref, item)
for WSA in lookfor:
action = DOM.getAttr(item, 'Action', WSA.ADDRESS, None)
if action: break
operation.setOutput(message, name, docs, action)
for item in DOM.getElements(element, 'fault', None):
name = DOM.getAttr(item, 'name')
docs = GetDocumentation(item)
msgref = DOM.getAttr(item, 'message')
message = ParseQName(msgref, item)
for WSA in lookfor:
action = DOM.getAttr(item, 'Action', WSA.ADDRESS, None)
if action: break
operation.addFault(message, name, docs, action)
def toDom(self):
wsdl = self.getWSDL()
ep = ElementProxy(None, DOM.getElement(wsdl.document, None))
epc = ep.createAppendElement(DOM.GetWSDLUri(wsdl.version), 'portType')
epc.setAttributeNS(None, 'name', self.name)
if self.resourceProperties:
ns,name = self.resourceProperties
prefix = epc.getPrefix(ns)
epc.setAttributeNS(OASIS.PROPERTIES, 'ResourceProperties', '%s:%s'%(prefix,name))
for op in self.operations:
op.toDom(epc._getNode())
class Operation(Element):
def __init__(self, name, documentation='', parameterOrder=None):
Element.__init__(self, name, documentation)
self.parameterOrder = parameterOrder
self.faults = Collection(self)
self.input = None
self.output = None
def getWSDL(self):
"""Return the WSDL object that contains this Operation."""
return self.parent().parent().parent().parent()
def getPortType(self):
return self.parent().parent()
def getInputAction(self):
"""wsa:Action attribute"""
return GetWSAActionInput(self)
def getInputMessage(self):
if self.input is None:
return None
wsdl = self.getPortType().getWSDL()
return wsdl.messages[self.input.message]
def getOutputAction(self):
"""wsa:Action attribute"""
return GetWSAActionOutput(self)
def getOutputMessage(self):
if self.output is None:
return None
wsdl = self.getPortType().getWSDL()
return wsdl.messages[self.output.message]
def getFaultAction(self, name):
"""wsa:Action attribute"""
return GetWSAActionFault(self, name)
def getFaultMessage(self, name):
wsdl = self.getPortType().getWSDL()
return wsdl.messages[self.faults[name].message]
def addFault(self, message, name, documentation='', action=None):
if self.faults.has_key(name):
raise WSDLError(
'Duplicate fault element: %s' % name
)
item = MessageRole('fault', message, name, documentation, action)
self.faults[name] = item
return item
def setInput(self, message, name='', documentation='', action=None):
self.input = MessageRole('input', message, name, documentation, action)
self.input.parent = weakref.ref(self)
return self.input
def setOutput(self, message, name='', documentation='', action=None):
self.output = MessageRole('output', message, name, documentation, action)
self.output.parent = weakref.ref(self)
return self.output
def toDom(self, node):
wsdl = self.getWSDL()
ep = ElementProxy(None, node)
epc = ep.createAppendElement(DOM.GetWSDLUri(wsdl.version), 'operation')
epc.setAttributeNS(None, 'name', self.name)
node = epc._getNode()
if self.input:
self.input.toDom(node)
if self.output:
self.output.toDom(node)
for fault in self.faults:
fault.toDom(node)
class MessageRole(Element):
def __init__(self, type, message, name='', documentation='', action=None):
Element.__init__(self, name, documentation)
self.message = message
self.type = type
self.action = action
def getWSDL(self):
"""Return the WSDL object that contains this MessageRole."""
if self.parent().getWSDL() == 'fault':
return self.parent().parent().getWSDL()
return self.parent().getWSDL()
def getMessage(self):
"""Return the WSDL object that represents the attribute message
(namespaceURI, name) tuple
"""
wsdl = self.getWSDL()
return wsdl.messages[self.message]
def toDom(self, node):
wsdl = self.getWSDL()
ep = ElementProxy(None, node)
epc = ep.createAppendElement(DOM.GetWSDLUri(wsdl.version), self.type)
epc.setAttributeNS(None, 'message', self.message)
if self.action:
epc.setAttributeNS(WSA200408.ADDRESS, 'Action', self.action)
class Binding(Element):
def __init__(self, name, type, documentation=''):
Element.__init__(self, name, documentation)
self.operations = Collection(self)
self.type = type
def getWSDL(self):
"""Return the WSDL object that contains this binding."""
return self.parent().parent()
def getPortType(self):
"""Return the PortType object associated with this binding."""
return self.getWSDL().portTypes[self.type]
def findBinding(self, kind):
for item in self.extensions:
if isinstance(item, kind):
return item
return None
def findBindings(self, kind):
return [ item for item in self.extensions if isinstance(item, kind) ]
def addOperationBinding(self, name, documentation=''):
item = OperationBinding(name, documentation)
self.operations[name] = item
return item
def load(self, elements):
for element in elements:
name = DOM.getAttr(element, 'name')
docs = GetDocumentation(element)
opbinding = self.addOperationBinding(name, docs)
opbinding.load_ex(GetExtensions(element))
item = DOM.getElement(element, 'input', None, None)
if item is not None:
mbinding = MessageRoleBinding('input')
mbinding.documentation = GetDocumentation(item)
opbinding.input = mbinding
mbinding.load_ex(GetExtensions(item))
item = DOM.getElement(element, 'output', None, None)
if item is not None:
mbinding = MessageRoleBinding('output')
mbinding.documentation = GetDocumentation(item)
opbinding.output = mbinding
mbinding.load_ex(GetExtensions(item))
for item in DOM.getElements(element, 'fault', None):
name = DOM.getAttr(item, 'name')
mbinding = MessageRoleBinding('fault', name)
mbinding.documentation = GetDocumentation(item)
opbinding.faults[name] = mbinding
mbinding.load_ex(GetExtensions(item))
def load_ex(self, elements):
for e in elements:
ns, name = e.namespaceURI, e.localName
if ns in DOM.NS_SOAP_BINDING_ALL and name == 'binding':
transport = DOM.getAttr(e, 'transport', default=None)
style = DOM.getAttr(e, 'style', default='document')
ob = SoapBinding(transport, style)
self.addExtension(ob)
continue
elif ns in DOM.NS_HTTP_BINDING_ALL and name == 'binding':
verb = DOM.getAttr(e, 'verb')
ob = HttpBinding(verb)
self.addExtension(ob)
continue
else:
self.addExtension(e)
def toDom(self):
wsdl = self.getWSDL()
ep = ElementProxy(None, DOM.getElement(wsdl.document, None))
epc = ep.createAppendElement(DOM.GetWSDLUri(wsdl.version), 'binding')
epc.setAttributeNS(None, 'name', self.name)
ns,name = self.type
prefix = epc.getPrefix(ns)
epc.setAttributeNS(None, 'type', '%s:%s' %(prefix,name))
node = epc._getNode()
for ext in self.extensions:
ext.toDom(node)
for op_binding in self.operations:
op_binding.toDom(node)
class OperationBinding(Element):
def __init__(self, name, documentation=''):
Element.__init__(self, name, documentation)
self.input = None
self.output = None
self.faults = Collection(self)
def getWSDL(self):
"""Return the WSDL object that contains this binding."""
return self.parent().parent().parent().parent()
def getBinding(self):
"""Return the parent Binding object of the operation binding."""
return self.parent().parent()
def getOperation(self):
"""Return the abstract Operation associated with this binding."""
return self.getBinding().getPortType().operations[self.name]
def findBinding(self, kind):
for item in self.extensions:
if isinstance(item, kind):
return item
return None
def findBindings(self, kind):
return [ item for item in self.extensions if isinstance(item, kind) ]
def addInputBinding(self, binding):
if self.input is None:
self.input = MessageRoleBinding('input')
self.input.parent = weakref.ref(self)
self.input.addExtension(binding)
return binding
def addOutputBinding(self, binding):
if self.output is None:
self.output = MessageRoleBinding('output')
self.output.parent = weakref.ref(self)
self.output.addExtension(binding)
return binding
def addFaultBinding(self, name, binding):
fault = self.get(name, None)
if fault is None:
fault = MessageRoleBinding('fault', name)
fault.addExtension(binding)
return binding
def load_ex(self, elements):
for e in elements:
ns, name = e.namespaceURI, e.localName
if ns in DOM.NS_SOAP_BINDING_ALL and name == 'operation':
soapaction = DOM.getAttr(e, 'soapAction', default=None)
style = DOM.getAttr(e, 'style', default=None)
ob = SoapOperationBinding(soapaction, style)
self.addExtension(ob)
continue
elif ns in DOM.NS_HTTP_BINDING_ALL and name == 'operation':
location = DOM.getAttr(e, 'location')
ob = HttpOperationBinding(location)
self.addExtension(ob)
continue
else:
self.addExtension(e)
def toDom(self, node):
wsdl = self.getWSDL()
ep = ElementProxy(None, node)
epc = ep.createAppendElement(DOM.GetWSDLUri(wsdl.version), 'operation')
epc.setAttributeNS(None, 'name', self.name)
node = epc._getNode()
for ext in self.extensions:
ext.toDom(node)
if self.input:
self.input.toDom(node)
if self.output:
self.output.toDom(node)
for fault in self.faults:
fault.toDom(node)
class MessageRoleBinding(Element):
def __init__(self, type, name='', documentation=''):
Element.__init__(self, name, documentation)
self.type = type
def getWSDL(self):
"""Return the WSDL object that contains this MessageRole."""
if self.type == 'fault':
return self.parent().parent().getWSDL()
return self.parent().getWSDL()
def findBinding(self, kind):
for item in self.extensions:
if isinstance(item, kind):
return item
return None
def findBindings(self, kind):
return [ item for item in self.extensions if isinstance(item, kind) ]
def load_ex(self, elements):
for e in elements:
ns, name = e.namespaceURI, e.localName
if ns in DOM.NS_SOAP_BINDING_ALL and name == 'body':
encstyle = DOM.getAttr(e, 'encodingStyle', default=None)
namespace = DOM.getAttr(e, 'namespace', default=None)
parts = DOM.getAttr(e, 'parts', default=None)
use = DOM.getAttr(e, 'use', default=None)
if use is None:
raise WSDLError(
'Invalid soap:body binding element.'
)
ob = SoapBodyBinding(use, namespace, encstyle, parts)
self.addExtension(ob)
continue
elif ns in DOM.NS_SOAP_BINDING_ALL and name == 'fault':
encstyle = DOM.getAttr(e, 'encodingStyle', default=None)
namespace = DOM.getAttr(e, 'namespace', default=None)
name = DOM.getAttr(e, 'name', default=None)
use = DOM.getAttr(e, 'use', default=None)
if use is None or name is None:
raise WSDLError(
'Invalid soap:fault binding element.'
)
ob = SoapFaultBinding(name, use, namespace, encstyle)
self.addExtension(ob)
continue
elif ns in DOM.NS_SOAP_BINDING_ALL and name in (
'header', 'headerfault'
):
encstyle = DOM.getAttr(e, 'encodingStyle', default=None)
namespace = DOM.getAttr(e, 'namespace', default=None)
message = DOM.getAttr(e, 'message')
part = DOM.getAttr(e, 'part')
use = DOM.getAttr(e, 'use')
if name == 'header':
_class = SoapHeaderBinding
else:
_class = SoapHeaderFaultBinding
message = ParseQName(message, e)
ob = _class(message, part, use, namespace, encstyle)
self.addExtension(ob)
continue
elif ns in DOM.NS_HTTP_BINDING_ALL and name == 'urlReplacement':
ob = HttpUrlReplacementBinding()
self.addExtension(ob)
continue
elif ns in DOM.NS_HTTP_BINDING_ALL and name == 'urlEncoded':
ob = HttpUrlEncodedBinding()
self.addExtension(ob)
continue
elif ns in DOM.NS_MIME_BINDING_ALL and name == 'multipartRelated':
ob = MimeMultipartRelatedBinding()
self.addExtension(ob)
ob.load_ex(GetExtensions(e))
continue
elif ns in DOM.NS_MIME_BINDING_ALL and name == 'content':
part = DOM.getAttr(e, 'part', default=None)
type = DOM.getAttr(e, 'type', default=None)
ob = MimeContentBinding(part, type)
self.addExtension(ob)
continue
elif ns in DOM.NS_MIME_BINDING_ALL and name == 'mimeXml':
part = DOM.getAttr(e, 'part', default=None)
ob = MimeXmlBinding(part)
self.addExtension(ob)
continue
else:
self.addExtension(e)
def toDom(self, node):
wsdl = self.getWSDL()
ep = ElementProxy(None, node)
epc = ep.createAppendElement(DOM.GetWSDLUri(wsdl.version), self.type)
node = epc._getNode()
for item in self.extensions:
if item: item.toDom(node)
class Service(Element):
def __init__(self, name, documentation=''):
Element.__init__(self, name, documentation)
self.ports = Collection(self)
def getWSDL(self):
return self.parent().parent()
def addPort(self, name, binding, documentation=''):
item = Port(name, binding, documentation)
self.ports[name] = item
return item
def load(self, elements):
for element in elements:
name = DOM.getAttr(element, 'name', default=None)
docs = GetDocumentation(element)
binding = DOM.getAttr(element, 'binding', default=None)
if name is None or binding is None:
raise WSDLError(
'Invalid port element.'
)
binding = ParseQName(binding, element)
port = self.addPort(name, binding, docs)
port.load_ex(GetExtensions(element))
def load_ex(self, elements):
for e in elements:
self.addExtension(e)
def toDom(self):
wsdl = self.getWSDL()
ep = ElementProxy(None, DOM.getElement(wsdl.document, None))
epc = ep.createAppendElement(DOM.GetWSDLUri(wsdl.version), "service")
epc.setAttributeNS(None, "name", self.name)
node = epc._getNode()
for port in self.ports:
port.toDom(node)
class Port(Element):
def __init__(self, name, binding, documentation=''):
Element.__init__(self, name, documentation)
self.binding = binding
def getWSDL(self):
return self.parent().parent().getWSDL()
def getService(self):
"""Return the Service object associated with this port."""
return self.parent().parent()
def getBinding(self):
"""Return the Binding object that is referenced by this port."""
wsdl = self.getService().getWSDL()
return wsdl.bindings[self.binding]
def getPortType(self):
"""Return the PortType object that is referenced by this port."""
wsdl = self.getService().getWSDL()
binding = wsdl.bindings[self.binding]
return wsdl.portTypes[binding.type]
def getAddressBinding(self):
"""A convenience method to obtain the extension element used
as the address binding for the port."""
for item in self.extensions:
if isinstance(item, SoapAddressBinding) or \
isinstance(item, HttpAddressBinding):
return item
raise WSDLError(
'No address binding found in port.'
)
def load_ex(self, elements):
for e in elements:
ns, name = e.namespaceURI, e.localName
if ns in DOM.NS_SOAP_BINDING_ALL and name == 'address':
location = DOM.getAttr(e, 'location', default=None)
ob = SoapAddressBinding(location)
self.addExtension(ob)
continue
elif ns in DOM.NS_HTTP_BINDING_ALL and name == 'address':
location = DOM.getAttr(e, 'location', default=None)
ob = HttpAddressBinding(location)
self.addExtension(ob)
continue
else:
self.addExtension(e)
def toDom(self, node):
wsdl = self.getWSDL()
ep = ElementProxy(None, node)
epc = ep.createAppendElement(DOM.GetWSDLUri(wsdl.version), "port")
epc.setAttributeNS(None, "name", self.name)
ns,name = self.binding
prefix = epc.getPrefix(ns)
epc.setAttributeNS(None, "binding", "%s:%s" %(prefix,name))
node = epc._getNode()
for ext in self.extensions:
ext.toDom(node)
class SoapBinding:
def __init__(self, transport, style='rpc'):
self.transport = transport
self.style = style
def getWSDL(self):
return self.parent().getWSDL()
def toDom(self, node):
wsdl = self.getWSDL()
ep = ElementProxy(None, node)
epc = ep.createAppendElement(DOM.GetWSDLSoapBindingUri(wsdl.version), 'binding')
if self.transport:
epc.setAttributeNS(None, "transport", self.transport)
if self.style:
epc.setAttributeNS(None, "style", self.style)
class SoapAddressBinding:
def __init__(self, location):
self.location = location
def getWSDL(self):
return self.parent().getWSDL()
def toDom(self, node):
wsdl = self.getWSDL()
ep = ElementProxy(None, node)
epc = ep.createAppendElement(DOM.GetWSDLSoapBindingUri(wsdl.version), 'address')
epc.setAttributeNS(None, "location", self.location)
class SoapOperationBinding:
def __init__(self, soapAction=None, style=None):
self.soapAction = soapAction
self.style = style
def getWSDL(self):
return self.parent().getWSDL()
def toDom(self, node):
wsdl = self.getWSDL()
ep = ElementProxy(None, node)
epc = ep.createAppendElement(DOM.GetWSDLSoapBindingUri(wsdl.version), 'operation')
if self.soapAction:
epc.setAttributeNS(None, 'soapAction', self.soapAction)
if self.style:
epc.setAttributeNS(None, 'style', self.style)
class SoapBodyBinding:
def __init__(self, use, namespace=None, encodingStyle=None, parts=None):
if not use in ('literal', 'encoded'):
raise WSDLError(
'Invalid use attribute value: %s' % use
)
self.encodingStyle = encodingStyle
self.namespace = namespace
if type(parts) in (type(''), type(u'')):
parts = parts.split()
self.parts = parts
self.use = use
def getWSDL(self):
return self.parent().getWSDL()
def toDom(self, node):
wsdl = self.getWSDL()
ep = ElementProxy(None, node)
epc = ep.createAppendElement(DOM.GetWSDLSoapBindingUri(wsdl.version), 'body')
epc.setAttributeNS(None, "use", self.use)
epc.setAttributeNS(None, "namespace", self.namespace)
class SoapFaultBinding:
def __init__(self, name, use, namespace=None, encodingStyle=None):
if not use in ('literal', 'encoded'):
raise WSDLError(
'Invalid use attribute value: %s' % use
)
self.encodingStyle = encodingStyle
self.namespace = namespace
self.name = name
self.use = use
class SoapHeaderBinding:
def __init__(self, message, part, use, namespace=None, encodingStyle=None):
if not use in ('literal', 'encoded'):
raise WSDLError(
'Invalid use attribute value: %s' % use
)
self.encodingStyle = encodingStyle
self.namespace = namespace
self.message = message
self.part = part
self.use = use
tagname = 'header'
class SoapHeaderFaultBinding(SoapHeaderBinding):
tagname = 'headerfault'
class HttpBinding:
def __init__(self, verb):
self.verb = verb
class HttpAddressBinding:
def __init__(self, location):
self.location = location
class HttpOperationBinding:
def __init__(self, location):
self.location = location
class HttpUrlReplacementBinding:
pass
class HttpUrlEncodedBinding:
pass
class MimeContentBinding:
def __init__(self, part=None, type=None):
self.part = part
self.type = type
class MimeXmlBinding:
def __init__(self, part=None):
self.part = part
class MimeMultipartRelatedBinding:
def __init__(self):
self.parts = []
def load_ex(self, elements):
for e in elements:
ns, name = e.namespaceURI, e.localName
if ns in DOM.NS_MIME_BINDING_ALL and name == 'part':
self.parts.append(MimePartBinding())
continue
class MimePartBinding:
def __init__(self):
self.items = []
def load_ex(self, elements):
for e in elements:
ns, name = e.namespaceURI, e.localName
if ns in DOM.NS_MIME_BINDING_ALL and name == 'content':
part = DOM.getAttr(e, 'part', default=None)
type = DOM.getAttr(e, 'type', default=None)
ob = MimeContentBinding(part, type)
self.items.append(ob)
continue
elif ns in DOM.NS_MIME_BINDING_ALL and name == 'mimeXml':
part = DOM.getAttr(e, 'part', default=None)
ob = MimeXmlBinding(part)
self.items.append(ob)
continue
elif ns in DOM.NS_SOAP_BINDING_ALL and name == 'body':
encstyle = DOM.getAttr(e, 'encodingStyle', default=None)
namespace = DOM.getAttr(e, 'namespace', default=None)
parts = DOM.getAttr(e, 'parts', default=None)
use = DOM.getAttr(e, 'use', default=None)
if use is None:
raise WSDLError(
'Invalid soap:body binding element.'
)
ob = SoapBodyBinding(use, namespace, encstyle, parts)
self.items.append(ob)
continue
class WSDLError(Exception):
pass
def DeclareNSPrefix(writer, prefix, nsuri):
if writer.hasNSPrefix(nsuri):
return
writer.declareNSPrefix(prefix, nsuri)
def ParseTypeRef(value, element):
parts = value.split(':', 1)
if len(parts) == 1:
return (DOM.findTargetNS(element), value)
nsuri = DOM.findNamespaceURI(parts[0], element)
return (nsuri, parts[1])
def ParseQName(value, element):
nameref = value.split(':', 1)
if len(nameref) == 2:
nsuri = DOM.findNamespaceURI(nameref[0], element)
name = nameref[-1]
else:
nsuri = DOM.findTargetNS(element)
name = nameref[-1]
return nsuri, name
def GetDocumentation(element):
docnode = DOM.getElement(element, 'documentation', None, None)
if docnode is not None:
return DOM.getElementText(docnode)
return ''
def GetExtensions(element):
return [ item for item in DOM.getElements(element, None, None)
if item.namespaceURI != DOM.NS_WSDL ]
def GetWSAActionFault(operation, name):
"""Find wsa:Action attribute, and return value or WSA.FAULT
for the default.
"""
attr = operation.faults[name].action
if attr is not None:
return attr
return WSA.FAULT
def GetWSAActionInput(operation):
"""Find wsa:Action attribute, and return value or the default."""
attr = operation.input.action
if attr is not None:
return attr
portType = operation.getPortType()
targetNamespace = portType.getTargetNamespace()
ptName = portType.name
msgName = operation.input.name
if not msgName:
msgName = operation.name + 'Request'
if targetNamespace.endswith('/'):
return '%s%s/%s' %(targetNamespace, ptName, msgName)
return '%s/%s/%s' %(targetNamespace, ptName, msgName)
def GetWSAActionOutput(operation):
"""Find wsa:Action attribute, and return value or the default."""
attr = operation.output.action
if attr is not None:
return attr
targetNamespace = operation.getPortType().getTargetNamespace()
ptName = operation.getPortType().name
msgName = operation.output.name
if not msgName:
msgName = operation.name + 'Response'
if targetNamespace.endswith('/'):
return '%s%s/%s' %(targetNamespace, ptName, msgName)
return '%s/%s/%s' %(targetNamespace, ptName, msgName)
def FindExtensions(object, kind, t_type=type(())):
if isinstance(kind, t_type):
result = []
namespaceURI, name = kind
return [ item for item in object.extensions
if hasattr(item, 'nodeType') \
and DOM.nsUriMatch(namespaceURI, item.namespaceURI) \
and item.name == name ]
return [ item for item in object.extensions if isinstance(item, kind) ]
def FindExtension(object, kind, t_type=type(())):
if isinstance(kind, t_type):
namespaceURI, name = kind
for item in object.extensions:
if hasattr(item, 'nodeType') \
and DOM.nsUriMatch(namespaceURI, item.namespaceURI) \
and item.name == name:
return item
else:
for item in object.extensions:
if isinstance(item, kind):
return item
return None
class SOAPCallInfo:
"""SOAPCallInfo captures the important binding information about a
SOAP operation, in a structure that is easier to work with than
raw WSDL structures."""
def __init__(self, methodName):
self.methodName = methodName
self.inheaders = []
self.outheaders = []
self.inparams = []
self.outparams = []
self.retval = None
encodingStyle = DOM.NS_SOAP_ENC
documentation = ''
soapAction = None
transport = None
namespace = None
location = None
use = 'encoded'
style = 'rpc'
def addInParameter(self, name, type, namespace=None, element_type=0):
"""Add an input parameter description to the call info."""
parameter = ParameterInfo(name, type, namespace, element_type)
self.inparams.append(parameter)
return parameter
def addOutParameter(self, name, type, namespace=None, element_type=0):
"""Add an output parameter description to the call info."""
parameter = ParameterInfo(name, type, namespace, element_type)
self.outparams.append(parameter)
return parameter
def setReturnParameter(self, name, type, namespace=None, element_type=0):
"""Set the return parameter description for the call info."""
parameter = ParameterInfo(name, type, namespace, element_type)
self.retval = parameter
return parameter
def addInHeaderInfo(self, name, type, namespace, element_type=0,
mustUnderstand=0):
"""Add an input SOAP header description to the call info."""
headerinfo = HeaderInfo(name, type, namespace, element_type)
if mustUnderstand:
headerinfo.mustUnderstand = 1
self.inheaders.append(headerinfo)
return headerinfo
def addOutHeaderInfo(self, name, type, namespace, element_type=0,
mustUnderstand=0):
"""Add an output SOAP header description to the call info."""
headerinfo = HeaderInfo(name, type, namespace, element_type)
if mustUnderstand:
headerinfo.mustUnderstand = 1
self.outheaders.append(headerinfo)
return headerinfo
def getInParameters(self):
"""Return a sequence of the in parameters of the method."""
return self.inparams
def getOutParameters(self):
"""Return a sequence of the out parameters of the method."""
return self.outparams
def getReturnParameter(self):
"""Return param info about the return value of the method."""
return self.retval
def getInHeaders(self):
"""Return a sequence of the in headers of the method."""
return self.inheaders
def getOutHeaders(self):
"""Return a sequence of the out headers of the method."""
return self.outheaders
class ParameterInfo:
"""A ParameterInfo object captures parameter binding information."""
def __init__(self, name, type, namespace=None, element_type=0):
if element_type:
self.element_type = 1
if namespace is not None:
self.namespace = namespace
self.name = name
self.type = type
element_type = 0
namespace = None
default = None
class HeaderInfo(ParameterInfo):
"""A HeaderInfo object captures SOAP header binding information."""
def __init__(self, name, type, namespace, element_type=None):
ParameterInfo.__init__(self, name, type, namespace, element_type)
mustUnderstand = 0
actor = None
def callInfoFromWSDL(port, name):
"""Return a SOAPCallInfo given a WSDL port and operation name."""
wsdl = port.getService().getWSDL()
binding = port.getBinding()
portType = binding.getPortType()
operation = portType.operations[name]
opbinding = binding.operations[name]
messages = wsdl.messages
callinfo = SOAPCallInfo(name)
addrbinding = port.getAddressBinding()
if not isinstance(addrbinding, SoapAddressBinding):
raise ValueError, 'Unsupported binding type.'
callinfo.location = addrbinding.location
soapbinding = binding.findBinding(SoapBinding)
if soapbinding is None:
raise ValueError, 'Missing soap:binding element.'
callinfo.transport = soapbinding.transport
callinfo.style = soapbinding.style or 'document'
soap_op_binding = opbinding.findBinding(SoapOperationBinding)
if soap_op_binding is not None:
callinfo.soapAction = soap_op_binding.soapAction
callinfo.style = soap_op_binding.style or callinfo.style
parameterOrder = operation.parameterOrder
if operation.input is not None:
message = messages[operation.input.message]
msgrole = opbinding.input
mime = msgrole.findBinding(MimeMultipartRelatedBinding)
if mime is not None:
raise ValueError, 'Mime bindings are not supported.'
else:
for item in msgrole.findBindings(SoapHeaderBinding):
part = messages[item.message].parts[item.part]
header = callinfo.addInHeaderInfo(
part.name,
part.element or part.type,
item.namespace,
element_type = part.element and 1 or 0
)
header.encodingStyle = item.encodingStyle
body = msgrole.findBinding(SoapBodyBinding)
if body is None:
raise ValueError, 'Missing soap:body binding.'
callinfo.encodingStyle = body.encodingStyle
callinfo.namespace = body.namespace
callinfo.use = body.use
if body.parts is not None:
parts = []
for name in body.parts:
parts.append(message.parts[name])
else:
parts = message.parts.values()
for part in parts:
callinfo.addInParameter(
part.name,
part.element or part.type,
element_type = part.element and 1 or 0
)
if operation.output is not None:
try:
message = messages[operation.output.message]
except KeyError:
if self.strict:
raise RuntimeError(
"Recieved message not defined in the WSDL schema: %s" %
operation.output.message)
else:
message = wsdl.addMessage(operation.output.message)
print "Warning:", \
"Recieved message not defined in the WSDL schema.", \
"Adding it."
print "Message:", operation.output.message
msgrole = opbinding.output
mime = msgrole.findBinding(MimeMultipartRelatedBinding)
if mime is not None:
raise ValueError, 'Mime bindings are not supported.'
else:
for item in msgrole.findBindings(SoapHeaderBinding):
part = messages[item.message].parts[item.part]
header = callinfo.addOutHeaderInfo(
part.name,
part.element or part.type,
item.namespace,
element_type = part.element and 1 or 0
)
header.encodingStyle = item.encodingStyle
body = msgrole.findBinding(SoapBodyBinding)
if body is None:
raise ValueError, 'Missing soap:body binding.'
callinfo.encodingStyle = body.encodingStyle
callinfo.namespace = body.namespace
callinfo.use = body.use
if body.parts is not None:
parts = []
for name in body.parts:
parts.append(message.parts[name])
else:
parts = message.parts.values()
if parts:
for part in parts:
callinfo.addOutParameter(
part.name,
part.element or part.type,
element_type = part.element and 1 or 0
)
return callinfo
| Python |
"""Based on code from timeout_socket.py, with some tweaks for compatibility.
These tweaks should really be rolled back into timeout_socket, but it's
not totally clear who is maintaining it at this point. In the meantime,
we'll use a different module name for our tweaked version to avoid any
confusion.
The original timeout_socket is by:
Scott Cotton <scott@chronis.pobox.com>
Lloyd Zusman <ljz@asfast.com>
Phil Mayes <pmayes@olivebr.com>
Piers Lauder <piers@cs.su.oz.au>
Radovan Garabik <garabik@melkor.dnp.fmph.uniba.sk>
"""
ident = "$Id: TimeoutSocket.py,v 1.2 2003/05/20 21:10:12 warnes Exp $"
import string, socket, select, errno
WSAEINVAL = getattr(errno, 'WSAEINVAL', 10022)
class TimeoutSocket:
"""A socket imposter that supports timeout limits."""
def __init__(self, timeout=20, sock=None):
self.timeout = float(timeout)
self.inbuf = ''
if sock is None:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock = sock
self.sock.setblocking(0)
self._rbuf = ''
self._wbuf = ''
def __getattr__(self, name):
# Delegate to real socket attributes.
return getattr(self.sock, name)
def connect(self, *addr):
timeout = self.timeout
sock = self.sock
try:
# Non-blocking mode
sock.setblocking(0)
apply(sock.connect, addr)
sock.setblocking(timeout != 0)
return 1
except socket.error,why:
if not timeout:
raise
sock.setblocking(1)
if len(why.args) == 1:
code = 0
else:
code, why = why
if code not in (
errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK
):
raise
r,w,e = select.select([],[sock],[],timeout)
if w:
try:
apply(sock.connect, addr)
return 1
except socket.error,why:
if len(why.args) == 1:
code = 0
else:
code, why = why
if code in (errno.EISCONN, WSAEINVAL):
return 1
raise
raise TimeoutError('socket connect() timeout.')
def send(self, data, flags=0):
total = len(data)
next = 0
while 1:
r, w, e = select.select([],[self.sock], [], self.timeout)
if w:
buff = data[next:next + 8192]
sent = self.sock.send(buff, flags)
next = next + sent
if next == total:
return total
continue
raise TimeoutError('socket send() timeout.')
def recv(self, amt, flags=0):
if select.select([self.sock], [], [], self.timeout)[0]:
return self.sock.recv(amt, flags)
raise TimeoutError('socket recv() timeout.')
buffsize = 4096
handles = 1
def makefile(self, mode="r", buffsize=-1):
self.handles = self.handles + 1
self.mode = mode
return self
def close(self):
self.handles = self.handles - 1
if self.handles == 0 and self.sock.fileno() >= 0:
self.sock.close()
def read(self, n=-1):
if not isinstance(n, type(1)):
n = -1
if n >= 0:
k = len(self._rbuf)
if n <= k:
data = self._rbuf[:n]
self._rbuf = self._rbuf[n:]
return data
n = n - k
L = [self._rbuf]
self._rbuf = ""
while n > 0:
new = self.recv(max(n, self.buffsize))
if not new: break
k = len(new)
if k > n:
L.append(new[:n])
self._rbuf = new[n:]
break
L.append(new)
n = n - k
return "".join(L)
k = max(4096, self.buffsize)
L = [self._rbuf]
self._rbuf = ""
while 1:
new = self.recv(k)
if not new: break
L.append(new)
k = min(k*2, 1024**2)
return "".join(L)
def readline(self, limit=-1):
data = ""
i = self._rbuf.find('\n')
while i < 0 and not (0 < limit <= len(self._rbuf)):
new = self.recv(self.buffsize)
if not new: break
i = new.find('\n')
if i >= 0: i = i + len(self._rbuf)
self._rbuf = self._rbuf + new
if i < 0: i = len(self._rbuf)
else: i = i+1
if 0 <= limit < len(self._rbuf): i = limit
data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
return data
def readlines(self, sizehint = 0):
total = 0
list = []
while 1:
line = self.readline()
if not line: break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
return list
def writelines(self, list):
self.send(''.join(list))
def write(self, data):
self.send(data)
def flush(self):
pass
class TimeoutError(Exception):
pass
| Python |
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
ident = "$Id: Utility.py,v 1.20 2005/02/09 18:33:05 boverhof Exp $"
import types
import string, httplib, smtplib, urllib, socket, weakref
from os.path import isfile
from string import join, strip, split
from UserDict import UserDict
from cStringIO import StringIO
from TimeoutSocket import TimeoutSocket, TimeoutError
from urlparse import urlparse
from httplib import HTTPConnection, HTTPSConnection
from exceptions import Exception
import xml.dom.minidom
from xml.dom import Node
import logging
from c14n import Canonicalize
from Namespaces import SCHEMA, SOAP, XMLNS, ZSI_SCHEMA_URI
try:
from xml.dom.ext import SplitQName
except:
def SplitQName(qname):
'''SplitQName(qname) -> (string, string)
Split Qualified Name into a tuple of len 2, consisting
of the prefix and the local name.
(prefix, localName)
Special Cases:
xmlns -- (localName, 'xmlns')
None -- (None, localName)
'''
l = qname.split(':')
if len(l) == 1:
l.insert(0, None)
elif len(l) == 2:
if l[0] == 'xmlns':
l.reverse()
else:
return
return tuple(l)
class NamespaceError(Exception):
"""Used to indicate a Namespace Error."""
class RecursionError(Exception):
"""Used to indicate a HTTP redirect recursion."""
class ParseError(Exception):
"""Used to indicate a XML parsing error."""
class DOMException(Exception):
"""Used to indicate a problem processing DOM."""
class Base:
"""Base class for instance level Logging"""
def __init__(self, module=__name__):
self.logger = logging.getLogger('%s-%s(%x)' %(module, self.__class__, id(self)))
class HTTPResponse:
"""Captures the information in an HTTP response message."""
def __init__(self, response):
self.status = response.status
self.reason = response.reason
self.headers = response.msg
self.body = response.read() or None
response.close()
class TimeoutHTTP(HTTPConnection):
"""A custom http connection object that supports socket timeout."""
def __init__(self, host, port=None, timeout=20):
HTTPConnection.__init__(self, host, port)
self.timeout = timeout
def connect(self):
self.sock = TimeoutSocket(self.timeout)
self.sock.connect((self.host, self.port))
class TimeoutHTTPS(HTTPSConnection):
"""A custom https object that supports socket timeout. Note that this
is not really complete. The builtin SSL support in the Python socket
module requires a real socket (type) to be passed in to be hooked to
SSL. That means our fake socket won't work and our timeout hacks are
bypassed for send and recv calls. Since our hack _is_ in place at
connect() time, it should at least provide some timeout protection."""
def __init__(self, host, port=None, timeout=20, **kwargs):
HTTPSConnection.__init__(self, str(host), port, **kwargs)
self.timeout = timeout
def connect(self):
sock = TimeoutSocket(self.timeout)
sock.connect((self.host, self.port))
realsock = getattr(sock.sock, '_sock', sock.sock)
ssl = socket.ssl(realsock, self.key_file, self.cert_file)
self.sock = httplib.FakeSocket(sock, ssl)
def urlopen(url, timeout=20, redirects=None):
"""A minimal urlopen replacement hack that supports timeouts for http.
Note that this supports GET only."""
scheme, host, path, params, query, frag = urlparse(url)
if not scheme in ('http', 'https'):
return urllib.urlopen(url)
if params: path = '%s;%s' % (path, params)
if query: path = '%s?%s' % (path, query)
if frag: path = '%s#%s' % (path, frag)
if scheme == 'https':
# If ssl is not compiled into Python, you will not get an exception
# until a conn.endheaders() call. We need to know sooner, so use
# getattr.
if hasattr(socket, 'ssl'):
conn = TimeoutHTTPS(host, None, timeout)
else:
import M2Crypto
ctx = M2Crypto.SSL.Context()
ctx.set_session_timeout(timeout)
conn = M2Crypto.httpslib.HTTPSConnection(host, ssl_context=ctx)
#conn.set_debuglevel(1)
else:
conn = TimeoutHTTP(host, None, timeout)
conn.putrequest('GET', path)
conn.putheader('Connection', 'close')
conn.endheaders()
response = None
while 1:
response = conn.getresponse()
if response.status != 100:
break
conn._HTTPConnection__state = httplib._CS_REQ_SENT
conn._HTTPConnection__response = None
status = response.status
# If we get an HTTP redirect, we will follow it automatically.
if status >= 300 and status < 400:
location = response.msg.getheader('location')
if location is not None:
response.close()
if redirects is not None and redirects.has_key(location):
raise RecursionError(
'Circular HTTP redirection detected.'
)
if redirects is None:
redirects = {}
redirects[location] = 1
return urlopen(location, timeout, redirects)
raise HTTPResponse(response)
if not (status >= 200 and status < 300):
raise HTTPResponse(response)
body = StringIO(response.read())
response.close()
return body
class DOM:
"""The DOM singleton defines a number of XML related constants and
provides a number of utility methods for DOM related tasks. It
also provides some basic abstractions so that the rest of the
package need not care about actual DOM implementation in use."""
# Namespace stuff related to the SOAP specification.
NS_SOAP_ENV_1_1 = 'http://schemas.xmlsoap.org/soap/envelope/'
NS_SOAP_ENC_1_1 = 'http://schemas.xmlsoap.org/soap/encoding/'
NS_SOAP_ENV_1_2 = 'http://www.w3.org/2001/06/soap-envelope'
NS_SOAP_ENC_1_2 = 'http://www.w3.org/2001/06/soap-encoding'
NS_SOAP_ENV_ALL = (NS_SOAP_ENV_1_1, NS_SOAP_ENV_1_2)
NS_SOAP_ENC_ALL = (NS_SOAP_ENC_1_1, NS_SOAP_ENC_1_2)
NS_SOAP_ENV = NS_SOAP_ENV_1_1
NS_SOAP_ENC = NS_SOAP_ENC_1_1
_soap_uri_mapping = {
NS_SOAP_ENV_1_1 : '1.1',
NS_SOAP_ENV_1_2 : '1.2',
}
SOAP_ACTOR_NEXT_1_1 = 'http://schemas.xmlsoap.org/soap/actor/next'
SOAP_ACTOR_NEXT_1_2 = 'http://www.w3.org/2001/06/soap-envelope/actor/next'
SOAP_ACTOR_NEXT_ALL = (SOAP_ACTOR_NEXT_1_1, SOAP_ACTOR_NEXT_1_2)
def SOAPUriToVersion(self, uri):
"""Return the SOAP version related to an envelope uri."""
value = self._soap_uri_mapping.get(uri)
if value is not None:
return value
raise ValueError(
'Unsupported SOAP envelope uri: %s' % uri
)
def GetSOAPEnvUri(self, version):
"""Return the appropriate SOAP envelope uri for a given
human-friendly SOAP version string (e.g. '1.1')."""
attrname = 'NS_SOAP_ENV_%s' % join(split(version, '.'), '_')
value = getattr(self, attrname, None)
if value is not None:
return value
raise ValueError(
'Unsupported SOAP version: %s' % version
)
def GetSOAPEncUri(self, version):
"""Return the appropriate SOAP encoding uri for a given
human-friendly SOAP version string (e.g. '1.1')."""
attrname = 'NS_SOAP_ENC_%s' % join(split(version, '.'), '_')
value = getattr(self, attrname, None)
if value is not None:
return value
raise ValueError(
'Unsupported SOAP version: %s' % version
)
def GetSOAPActorNextUri(self, version):
"""Return the right special next-actor uri for a given
human-friendly SOAP version string (e.g. '1.1')."""
attrname = 'SOAP_ACTOR_NEXT_%s' % join(split(version, '.'), '_')
value = getattr(self, attrname, None)
if value is not None:
return value
raise ValueError(
'Unsupported SOAP version: %s' % version
)
# Namespace stuff related to XML Schema.
NS_XSD_99 = 'http://www.w3.org/1999/XMLSchema'
NS_XSI_99 = 'http://www.w3.org/1999/XMLSchema-instance'
NS_XSD_00 = 'http://www.w3.org/2000/10/XMLSchema'
NS_XSI_00 = 'http://www.w3.org/2000/10/XMLSchema-instance'
NS_XSD_01 = 'http://www.w3.org/2001/XMLSchema'
NS_XSI_01 = 'http://www.w3.org/2001/XMLSchema-instance'
NS_XSD_ALL = (NS_XSD_99, NS_XSD_00, NS_XSD_01)
NS_XSI_ALL = (NS_XSI_99, NS_XSI_00, NS_XSI_01)
NS_XSD = NS_XSD_01
NS_XSI = NS_XSI_01
_xsd_uri_mapping = {
NS_XSD_99 : NS_XSI_99,
NS_XSD_00 : NS_XSI_00,
NS_XSD_01 : NS_XSI_01,
}
for key, value in _xsd_uri_mapping.items():
_xsd_uri_mapping[value] = key
def InstanceUriForSchemaUri(self, uri):
"""Return the appropriate matching XML Schema instance uri for
the given XML Schema namespace uri."""
return self._xsd_uri_mapping.get(uri)
def SchemaUriForInstanceUri(self, uri):
"""Return the appropriate matching XML Schema namespace uri for
the given XML Schema instance namespace uri."""
return self._xsd_uri_mapping.get(uri)
# Namespace stuff related to WSDL.
NS_WSDL_1_1 = 'http://schemas.xmlsoap.org/wsdl/'
NS_WSDL_ALL = (NS_WSDL_1_1,)
NS_WSDL = NS_WSDL_1_1
NS_SOAP_BINDING_1_1 = 'http://schemas.xmlsoap.org/wsdl/soap/'
NS_HTTP_BINDING_1_1 = 'http://schemas.xmlsoap.org/wsdl/http/'
NS_MIME_BINDING_1_1 = 'http://schemas.xmlsoap.org/wsdl/mime/'
NS_SOAP_BINDING_ALL = (NS_SOAP_BINDING_1_1,)
NS_HTTP_BINDING_ALL = (NS_HTTP_BINDING_1_1,)
NS_MIME_BINDING_ALL = (NS_MIME_BINDING_1_1,)
NS_SOAP_BINDING = NS_SOAP_BINDING_1_1
NS_HTTP_BINDING = NS_HTTP_BINDING_1_1
NS_MIME_BINDING = NS_MIME_BINDING_1_1
NS_SOAP_HTTP_1_1 = 'http://schemas.xmlsoap.org/soap/http'
NS_SOAP_HTTP_ALL = (NS_SOAP_HTTP_1_1,)
NS_SOAP_HTTP = NS_SOAP_HTTP_1_1
_wsdl_uri_mapping = {
NS_WSDL_1_1 : '1.1',
}
def WSDLUriToVersion(self, uri):
"""Return the WSDL version related to a WSDL namespace uri."""
value = self._wsdl_uri_mapping.get(uri)
if value is not None:
return value
raise ValueError(
'Unsupported SOAP envelope uri: %s' % uri
)
def GetWSDLUri(self, version):
attr = 'NS_WSDL_%s' % join(split(version, '.'), '_')
value = getattr(self, attr, None)
if value is not None:
return value
raise ValueError(
'Unsupported WSDL version: %s' % version
)
def GetWSDLSoapBindingUri(self, version):
attr = 'NS_SOAP_BINDING_%s' % join(split(version, '.'), '_')
value = getattr(self, attr, None)
if value is not None:
return value
raise ValueError(
'Unsupported WSDL version: %s' % version
)
def GetWSDLHttpBindingUri(self, version):
attr = 'NS_HTTP_BINDING_%s' % join(split(version, '.'), '_')
value = getattr(self, attr, None)
if value is not None:
return value
raise ValueError(
'Unsupported WSDL version: %s' % version
)
def GetWSDLMimeBindingUri(self, version):
attr = 'NS_MIME_BINDING_%s' % join(split(version, '.'), '_')
value = getattr(self, attr, None)
if value is not None:
return value
raise ValueError(
'Unsupported WSDL version: %s' % version
)
def GetWSDLHttpTransportUri(self, version):
attr = 'NS_SOAP_HTTP_%s' % join(split(version, '.'), '_')
value = getattr(self, attr, None)
if value is not None:
return value
raise ValueError(
'Unsupported WSDL version: %s' % version
)
# Other xml namespace constants.
NS_XMLNS = 'http://www.w3.org/2000/xmlns/'
def isElement(self, node, name, nsuri=None):
"""Return true if the given node is an element with the given
name and optional namespace uri."""
if node.nodeType != node.ELEMENT_NODE:
return 0
return node.localName == name and \
(nsuri is None or self.nsUriMatch(node.namespaceURI, nsuri))
def getElement(self, node, name, nsuri=None, default=join):
"""Return the first child of node with a matching name and
namespace uri, or the default if one is provided."""
nsmatch = self.nsUriMatch
ELEMENT_NODE = node.ELEMENT_NODE
for child in node.childNodes:
if child.nodeType == ELEMENT_NODE:
if ((child.localName == name or name is None) and
(nsuri is None or nsmatch(child.namespaceURI, nsuri))
):
return child
if default is not join:
return default
raise KeyError, name
def getElementById(self, node, id, default=join):
"""Return the first child of node matching an id reference."""
attrget = self.getAttr
ELEMENT_NODE = node.ELEMENT_NODE
for child in node.childNodes:
if child.nodeType == ELEMENT_NODE:
if attrget(child, 'id') == id:
return child
if default is not join:
return default
raise KeyError, name
def getMappingById(self, document, depth=None, element=None,
mapping=None, level=1):
"""Create an id -> element mapping of those elements within a
document that define an id attribute. The depth of the search
may be controlled by using the (1-based) depth argument."""
if document is not None:
element = document.documentElement
mapping = {}
attr = element._attrs.get('id', None)
if attr is not None:
mapping[attr.value] = element
if depth is None or depth > level:
level = level + 1
ELEMENT_NODE = element.ELEMENT_NODE
for child in element.childNodes:
if child.nodeType == ELEMENT_NODE:
self.getMappingById(None, depth, child, mapping, level)
return mapping
def getElements(self, node, name, nsuri=None):
"""Return a sequence of the child elements of the given node that
match the given name and optional namespace uri."""
nsmatch = self.nsUriMatch
result = []
ELEMENT_NODE = node.ELEMENT_NODE
for child in node.childNodes:
if child.nodeType == ELEMENT_NODE:
if ((child.localName == name or name is None) and (
(nsuri is None) or nsmatch(child.namespaceURI, nsuri))):
result.append(child)
return result
def hasAttr(self, node, name, nsuri=None):
"""Return true if element has attribute with the given name and
optional nsuri. If nsuri is not specified, returns true if an
attribute exists with the given name with any namespace."""
if nsuri is None:
if node.hasAttribute(name):
return True
return False
return node.hasAttributeNS(nsuri, name)
def getAttr(self, node, name, nsuri=None, default=join):
"""Return the value of the attribute named 'name' with the
optional nsuri, or the default if one is specified. If
nsuri is not specified, an attribute that matches the
given name will be returned regardless of namespace."""
if nsuri is None:
result = node._attrs.get(name, None)
if result is None:
for item in node._attrsNS.keys():
if item[1] == name:
result = node._attrsNS[item]
break
else:
result = node._attrsNS.get((nsuri, name), None)
if result is not None:
return result.value
if default is not join:
return default
return ''
def getAttrs(self, node):
"""Return a Collection of all attributes
"""
attrs = {}
for k,v in node._attrs.items():
attrs[k] = v.value
return attrs
def getElementText(self, node, preserve_ws=None):
"""Return the text value of an xml element node. Leading and trailing
whitespace is stripped from the value unless the preserve_ws flag
is passed with a true value."""
result = []
for child in node.childNodes:
nodetype = child.nodeType
if nodetype == child.TEXT_NODE or \
nodetype == child.CDATA_SECTION_NODE:
result.append(child.nodeValue)
value = join(result, '')
if preserve_ws is None:
value = strip(value)
return value
def findNamespaceURI(self, prefix, node):
"""Find a namespace uri given a prefix and a context node."""
attrkey = (self.NS_XMLNS, prefix)
DOCUMENT_NODE = node.DOCUMENT_NODE
ELEMENT_NODE = node.ELEMENT_NODE
while 1:
if node is None:
raise DOMException('Value for prefix %s not found.' % prefix)
if node.nodeType != ELEMENT_NODE:
node = node.parentNode
continue
result = node._attrsNS.get(attrkey, None)
if result is not None:
return result.value
if hasattr(node, '__imported__'):
raise DOMException('Value for prefix %s not found.' % prefix)
node = node.parentNode
if node.nodeType == DOCUMENT_NODE:
raise DOMException('Value for prefix %s not found.' % prefix)
def findDefaultNS(self, node):
"""Return the current default namespace uri for the given node."""
attrkey = (self.NS_XMLNS, 'xmlns')
DOCUMENT_NODE = node.DOCUMENT_NODE
ELEMENT_NODE = node.ELEMENT_NODE
while 1:
if node.nodeType != ELEMENT_NODE:
node = node.parentNode
continue
result = node._attrsNS.get(attrkey, None)
if result is not None:
return result.value
if hasattr(node, '__imported__'):
raise DOMException('Cannot determine default namespace.')
node = node.parentNode
if node.nodeType == DOCUMENT_NODE:
raise DOMException('Cannot determine default namespace.')
def findTargetNS(self, node):
"""Return the defined target namespace uri for the given node."""
attrget = self.getAttr
attrkey = (self.NS_XMLNS, 'xmlns')
DOCUMENT_NODE = node.DOCUMENT_NODE
ELEMENT_NODE = node.ELEMENT_NODE
while 1:
if node.nodeType != ELEMENT_NODE:
node = node.parentNode
continue
result = attrget(node, 'targetNamespace', default=None)
if result is not None:
return result
node = node.parentNode
if node.nodeType == DOCUMENT_NODE:
raise DOMException('Cannot determine target namespace.')
def getTypeRef(self, element):
"""Return (namespaceURI, name) for a type attribue of the given
element, or None if the element does not have a type attribute."""
typeattr = self.getAttr(element, 'type', default=None)
if typeattr is None:
return None
parts = typeattr.split(':', 1)
if len(parts) == 2:
nsuri = self.findNamespaceURI(parts[0], element)
else:
nsuri = self.findDefaultNS(element)
return (nsuri, parts[1])
def importNode(self, document, node, deep=0):
"""Implements (well enough for our purposes) DOM node import."""
nodetype = node.nodeType
if nodetype in (node.DOCUMENT_NODE, node.DOCUMENT_TYPE_NODE):
raise DOMException('Illegal node type for importNode')
if nodetype == node.ENTITY_REFERENCE_NODE:
deep = 0
clone = node.cloneNode(deep)
self._setOwnerDoc(document, clone)
clone.__imported__ = 1
return clone
def _setOwnerDoc(self, document, node):
node.ownerDocument = document
for child in node.childNodes:
self._setOwnerDoc(document, child)
def nsUriMatch(self, value, wanted, strict=0, tt=type(())):
"""Return a true value if two namespace uri values match."""
if value == wanted or (type(wanted) is tt) and value in wanted:
return 1
if not strict:
wanted = type(wanted) is tt and wanted or (wanted,)
value = value[-1:] != '/' and value or value[:-1]
for item in wanted:
if item == value or item[:-1] == value:
return 1
return 0
def createDocument(self, nsuri, qname, doctype=None):
"""Create a new writable DOM document object."""
impl = xml.dom.minidom.getDOMImplementation()
return impl.createDocument(nsuri, qname, doctype)
def loadDocument(self, data):
"""Load an xml file from a file-like object and return a DOM
document instance."""
return xml.dom.minidom.parse(data)
def loadFromURL(self, url):
"""Load an xml file from a URL and return a DOM document."""
if isfile(url) is True:
file = open(url, 'r')
else:
file = urlopen(url)
try:
result = self.loadDocument(file)
except Exception, ex:
file.close()
raise ParseError(('Failed to load document %s' %url,) + ex.args)
else:
file.close()
return result
DOM = DOM()
class MessageInterface:
'''Higher Level Interface, delegates to DOM singleton, must
be subclassed and implement all methods that throw NotImplementedError.
'''
def __init__(self, sw):
'''Constructor, May be extended, do not override.
sw -- soapWriter instance
'''
self.sw = None
if type(sw) != weakref.ReferenceType and sw is not None:
self.sw = weakref.ref(sw)
else:
self.sw = sw
def AddCallback(self, func, *arglist):
self.sw().AddCallback(func, *arglist)
def Known(self, obj):
return self.sw().Known(obj)
def Forget(self, obj):
return self.sw().Forget(obj)
def canonicalize(self):
'''canonicalize the underlying DOM, and return as string.
'''
raise NotImplementedError, ''
def createDocument(self, namespaceURI=SOAP.ENV, localName='Envelope'):
'''create Document
'''
raise NotImplementedError, ''
def createAppendElement(self, namespaceURI, localName):
'''create and append element(namespaceURI,localName), and return
the node.
'''
raise NotImplementedError, ''
def findNamespaceURI(self, qualifiedName):
raise NotImplementedError, ''
def resolvePrefix(self, prefix):
raise NotImplementedError, ''
def setAttributeNS(self, namespaceURI, localName, value):
'''set attribute (namespaceURI, localName)=value
'''
raise NotImplementedError, ''
def setAttributeType(self, namespaceURI, localName):
'''set attribute xsi:type=(namespaceURI, localName)
'''
raise NotImplementedError, ''
def setNamespaceAttribute(self, namespaceURI, prefix):
'''set namespace attribute xmlns:prefix=namespaceURI
'''
raise NotImplementedError, ''
class ElementProxy(Base, MessageInterface):
'''
'''
_soap_env_prefix = 'SOAP-ENV'
_soap_enc_prefix = 'SOAP-ENC'
_zsi_prefix = 'ZSI'
_xsd_prefix = 'xsd'
_xsi_prefix = 'xsi'
_xml_prefix = 'xml'
_xmlns_prefix = 'xmlns'
_soap_env_nsuri = SOAP.ENV
_soap_enc_nsuri = SOAP.ENC
_zsi_nsuri = ZSI_SCHEMA_URI
_xsd_nsuri = SCHEMA.XSD3
_xsi_nsuri = SCHEMA.XSI3
_xml_nsuri = XMLNS.XML
_xmlns_nsuri = XMLNS.BASE
standard_ns = {\
_xml_prefix:_xml_nsuri,
_xmlns_prefix:_xmlns_nsuri
}
reserved_ns = {\
_soap_env_prefix:_soap_env_nsuri,
_soap_enc_prefix:_soap_enc_nsuri,
_zsi_prefix:_zsi_nsuri,
_xsd_prefix:_xsd_nsuri,
_xsi_prefix:_xsi_nsuri,
}
name = None
namespaceURI = None
def __init__(self, sw, message=None):
'''Initialize.
sw -- SoapWriter
'''
self._indx = 0
MessageInterface.__init__(self, sw)
Base.__init__(self)
self._dom = DOM
self.node = None
if type(message) in (types.StringType,types.UnicodeType):
self.loadFromString(message)
elif isinstance(message, ElementProxy):
self.node = message._getNode()
else:
self.node = message
self.processorNss = self.standard_ns.copy()
self.processorNss.update(self.reserved_ns)
def __str__(self):
return self.toString()
def evaluate(self, expression, processorNss=None):
'''expression -- XPath compiled expression
'''
from Ft.Xml import XPath
if not processorNss:
context = XPath.Context.Context(self.node, processorNss=self.processorNss)
else:
context = XPath.Context.Context(self.node, processorNss=processorNss)
nodes = expression.evaluate(context)
return map(lambda node: ElementProxy(self.sw,node), nodes)
#############################################
# Methods for checking/setting the
# classes (namespaceURI,name) node.
#############################################
def checkNode(self, namespaceURI=None, localName=None):
'''
namespaceURI -- namespace of element
localName -- local name of element
'''
namespaceURI = namespaceURI or self.namespaceURI
localName = localName or self.name
check = False
if localName and self.node:
check = self._dom.isElement(self.node, localName, namespaceURI)
if not check:
raise NamespaceError, 'unexpected node type %s, expecting %s' %(self.node, localName)
def setNode(self, node=None):
if node:
if isinstance(node, ElementProxy):
self.node = node._getNode()
else:
self.node = node
elif self.node:
node = self._dom.getElement(self.node, self.name, self.namespaceURI, default=None)
if not node:
raise NamespaceError, 'cant find element (%s,%s)' %(self.namespaceURI,self.name)
self.node = node
else:
#self.node = self._dom.create(self.node, self.name, self.namespaceURI, default=None)
self.createDocument(self.namespaceURI, localName=self.name, doctype=None)
self.checkNode()
#############################################
# Wrapper Methods for direct DOM Element Node access
#############################################
def _getNode(self):
return self.node
def _getElements(self):
return self._dom.getElements(self.node, name=None)
def _getOwnerDocument(self):
return self.node.ownerDocument or self.node
def _getUniquePrefix(self):
'''I guess we need to resolve all potential prefixes
because when the current node is attached it copies the
namespaces into the parent node.
'''
while 1:
self._indx += 1
prefix = 'ns%d' %self._indx
try:
self._dom.findNamespaceURI(prefix, self._getNode())
except DOMException, ex:
break
return prefix
def _getPrefix(self, node, nsuri):
'''
Keyword arguments:
node -- DOM Element Node
nsuri -- namespace of attribute value
'''
try:
if node and (node.nodeType == node.ELEMENT_NODE) and \
(nsuri == self._dom.findDefaultNS(node)):
return None
except DOMException, ex:
pass
if nsuri == XMLNS.XML:
return self._xml_prefix
if node.nodeType == Node.ELEMENT_NODE:
for attr in node.attributes.values():
if attr.namespaceURI == XMLNS.BASE \
and nsuri == attr.value:
return attr.localName
else:
if node.parentNode:
return self._getPrefix(node.parentNode, nsuri)
raise NamespaceError, 'namespaceURI "%s" is not defined' %nsuri
def _appendChild(self, node):
'''
Keyword arguments:
node -- DOM Element Node
'''
if node is None:
raise TypeError, 'node is None'
self.node.appendChild(node)
def _insertBefore(self, newChild, refChild):
'''
Keyword arguments:
child -- DOM Element Node to insert
refChild -- DOM Element Node
'''
self.node.insertBefore(newChild, refChild)
def _setAttributeNS(self, namespaceURI, qualifiedName, value):
'''
Keyword arguments:
namespaceURI -- namespace of attribute
qualifiedName -- qualified name of new attribute value
value -- value of attribute
'''
self.node.setAttributeNS(namespaceURI, qualifiedName, value)
#############################################
#General Methods
#############################################
def isFault(self):
'''check to see if this is a soap:fault message.
'''
return False
def getPrefix(self, namespaceURI):
try:
prefix = self._getPrefix(node=self.node, nsuri=namespaceURI)
except NamespaceError, ex:
prefix = self._getUniquePrefix()
self.setNamespaceAttribute(prefix, namespaceURI)
return prefix
def getDocument(self):
return self._getOwnerDocument()
def setDocument(self, document):
self.node = document
def importFromString(self, xmlString):
doc = self._dom.loadDocument(StringIO(xmlString))
node = self._dom.getElement(doc, name=None)
clone = self.importNode(node)
self._appendChild(clone)
def importNode(self, node):
if isinstance(node, ElementProxy):
node = node._getNode()
return self._dom.importNode(self._getOwnerDocument(), node, deep=1)
def loadFromString(self, data):
self.node = self._dom.loadDocument(StringIO(data))
def canonicalize(self):
return Canonicalize(self.node)
def toString(self):
return self.canonicalize()
def createDocument(self, namespaceURI, localName, doctype=None):
'''If specified must be a SOAP envelope, else may contruct an empty document.
'''
prefix = self._soap_env_prefix
if namespaceURI == self.reserved_ns[prefix]:
qualifiedName = '%s:%s' %(prefix,localName)
elif namespaceURI is localName is None:
self.node = self._dom.createDocument(None,None,None)
return
else:
raise KeyError, 'only support creation of document in %s' %self.reserved_ns[prefix]
document = self._dom.createDocument(nsuri=namespaceURI, qname=qualifiedName, doctype=doctype)
self.node = document.childNodes[0]
#set up reserved namespace attributes
for prefix,nsuri in self.reserved_ns.items():
self._setAttributeNS(namespaceURI=self._xmlns_nsuri,
qualifiedName='%s:%s' %(self._xmlns_prefix,prefix),
value=nsuri)
#############################################
#Methods for attributes
#############################################
def hasAttribute(self, namespaceURI, localName):
return self._dom.hasAttr(self._getNode(), name=localName, nsuri=namespaceURI)
def setAttributeType(self, namespaceURI, localName):
'''set xsi:type
Keyword arguments:
namespaceURI -- namespace of attribute value
localName -- name of new attribute value
'''
self.logger.debug('setAttributeType: (%s,%s)', namespaceURI, localName)
value = localName
if namespaceURI:
value = '%s:%s' %(self.getPrefix(namespaceURI),localName)
xsi_prefix = self.getPrefix(self._xsi_nsuri)
self._setAttributeNS(self._xsi_nsuri, '%s:type' %xsi_prefix, value)
def createAttributeNS(self, namespace, name, value):
document = self._getOwnerDocument()
attrNode = document.createAttributeNS(namespace, name, value)
def setAttributeNS(self, namespaceURI, localName, value):
'''
Keyword arguments:
namespaceURI -- namespace of attribute to create, None is for
attributes in no namespace.
localName -- local name of new attribute
value -- value of new attribute
'''
prefix = None
if namespaceURI:
try:
prefix = self.getPrefix(namespaceURI)
except KeyError, ex:
prefix = 'ns2'
self.setNamespaceAttribute(prefix, namespaceURI)
qualifiedName = localName
if prefix:
qualifiedName = '%s:%s' %(prefix, localName)
self._setAttributeNS(namespaceURI, qualifiedName, value)
def setNamespaceAttribute(self, prefix, namespaceURI):
'''
Keyword arguments:
prefix -- xmlns prefix
namespaceURI -- value of prefix
'''
self._setAttributeNS(XMLNS.BASE, 'xmlns:%s' %prefix, namespaceURI)
#############################################
#Methods for elements
#############################################
def createElementNS(self, namespace, qname):
'''
Keyword arguments:
namespace -- namespace of element to create
qname -- qualified name of new element
'''
document = self._getOwnerDocument()
node = document.createElementNS(namespace, qname)
return ElementProxy(self.sw, node)
def createAppendSetElement(self, namespaceURI, localName, prefix=None):
'''Create a new element (namespaceURI,name), append it
to current node, then set it to be the current node.
Keyword arguments:
namespaceURI -- namespace of element to create
localName -- local name of new element
prefix -- if namespaceURI is not defined, declare prefix. defaults
to 'ns1' if left unspecified.
'''
node = self.createAppendElement(namespaceURI, localName, prefix=None)
node=node._getNode()
self._setNode(node._getNode())
def createAppendElement(self, namespaceURI, localName, prefix=None):
'''Create a new element (namespaceURI,name), append it
to current node, and return the newly created node.
Keyword arguments:
namespaceURI -- namespace of element to create
localName -- local name of new element
prefix -- if namespaceURI is not defined, declare prefix. defaults
to 'ns1' if left unspecified.
'''
declare = False
qualifiedName = localName
if namespaceURI:
try:
prefix = self.getPrefix(namespaceURI)
except:
declare = True
prefix = prefix or self._getUniquePrefix()
if prefix:
qualifiedName = '%s:%s' %(prefix, localName)
node = self.createElementNS(namespaceURI, qualifiedName)
if declare:
node._setAttributeNS(XMLNS.BASE, 'xmlns:%s' %prefix, namespaceURI)
self._appendChild(node=node._getNode())
return node
def createInsertBefore(self, namespaceURI, localName, refChild):
qualifiedName = localName
prefix = self.getPrefix(namespaceURI)
if prefix:
qualifiedName = '%s:%s' %(prefix, localName)
node = self.createElementNS(namespaceURI, qualifiedName)
self._insertBefore(newChild=node._getNode(), refChild=refChild._getNode())
return node
def getElement(self, namespaceURI, localName):
'''
Keyword arguments:
namespaceURI -- namespace of element
localName -- local name of element
'''
node = self._dom.getElement(self.node, localName, namespaceURI, default=None)
if node:
return ElementProxy(self.sw, node)
return None
def getAttributeValue(self, namespaceURI, localName):
'''
Keyword arguments:
namespaceURI -- namespace of attribute
localName -- local name of attribute
'''
if self.hasAttribute(namespaceURI, localName):
attr = self.node.getAttributeNodeNS(namespaceURI,localName)
return attr.value
return None
def getValue(self):
return self._dom.getElementText(self.node, preserve_ws=True)
#############################################
#Methods for text nodes
#############################################
def createAppendTextNode(self, pyobj):
node = self.createTextNode(pyobj)
self._appendChild(node=node._getNode())
return node
def createTextNode(self, pyobj):
document = self._getOwnerDocument()
node = document.createTextNode(pyobj)
return ElementProxy(self.sw, node)
#############################################
#Methods for retrieving namespaceURI's
#############################################
def findNamespaceURI(self, qualifiedName):
parts = SplitQName(qualifiedName)
element = self._getNode()
if len(parts) == 1:
return (self._dom.findTargetNS(element), value)
return self._dom.findNamespaceURI(parts[0], element)
def resolvePrefix(self, prefix):
element = self._getNode()
return self._dom.findNamespaceURI(prefix, element)
def getSOAPEnvURI(self):
return self._soap_env_nsuri
def isEmpty(self):
return not self.node
class Collection(UserDict):
"""Helper class for maintaining ordered named collections."""
default = lambda self,k: k.name
def __init__(self, parent, key=None):
UserDict.__init__(self)
self.parent = weakref.ref(parent)
self.list = []
self._func = key or self.default
def __getitem__(self, key):
if type(key) is type(1):
return self.list[key]
return self.data[key]
def __setitem__(self, key, item):
item.parent = weakref.ref(self)
self.list.append(item)
self.data[key] = item
def keys(self):
return map(lambda i: self._func(i), self.list)
def items(self):
return map(lambda i: (self._func(i), i), self.list)
def values(self):
return self.list
class CollectionNS(UserDict):
"""Helper class for maintaining ordered named collections."""
default = lambda self,k: k.name
def __init__(self, parent, key=None):
UserDict.__init__(self)
self.parent = weakref.ref(parent)
self.targetNamespace = None
self.list = []
self._func = key or self.default
def __getitem__(self, key):
self.targetNamespace = self.parent().targetNamespace
if type(key) is types.IntType:
return self.list[key]
elif self.__isSequence(key):
nsuri,name = key
return self.data[nsuri][name]
return self.data[self.parent().targetNamespace][key]
def __setitem__(self, key, item):
item.parent = weakref.ref(self)
self.list.append(item)
targetNamespace = getattr(item, 'targetNamespace', self.parent().targetNamespace)
if not self.data.has_key(targetNamespace):
self.data[targetNamespace] = {}
self.data[targetNamespace][key] = item
def __isSequence(self, key):
return (type(key) in (types.TupleType,types.ListType) and len(key) == 2)
def keys(self):
keys = []
for tns in self.data.keys():
keys.append(map(lambda i: (tns,self._func(i)), self.data[tns].values()))
return keys
def items(self):
return map(lambda i: (self._func(i), i), self.list)
def values(self):
return self.list
# This is a runtime guerilla patch for pulldom (used by minidom) so
# that xml namespace declaration attributes are not lost in parsing.
# We need them to do correct QName linking for XML Schema and WSDL.
# The patch has been submitted to SF for the next Python version.
from xml.dom.pulldom import PullDOM, START_ELEMENT
if 1:
def startPrefixMapping(self, prefix, uri):
if not hasattr(self, '_xmlns_attrs'):
self._xmlns_attrs = []
self._xmlns_attrs.append((prefix or 'xmlns', uri))
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix or ''
PullDOM.startPrefixMapping = startPrefixMapping
def startElementNS(self, name, tagName , attrs):
# Retrieve xml namespace declaration attributes.
xmlns_uri = 'http://www.w3.org/2000/xmlns/'
xmlns_attrs = getattr(self, '_xmlns_attrs', None)
if xmlns_attrs is not None:
for aname, value in xmlns_attrs:
attrs._attrs[(xmlns_uri, aname)] = value
self._xmlns_attrs = []
uri, localname = name
if uri:
# When using namespaces, the reader may or may not
# provide us with the original name. If not, create
# *a* valid tagName from the current context.
if tagName is None:
prefix = self._current_context[uri]
if prefix:
tagName = prefix + ":" + localname
else:
tagName = localname
if self.document:
node = self.document.createElementNS(uri, tagName)
else:
node = self.buildDocument(uri, tagName)
else:
# When the tagname is not prefixed, it just appears as
# localname
if self.document:
node = self.document.createElement(localname)
else:
node = self.buildDocument(None, localname)
for aname,value in attrs.items():
a_uri, a_localname = aname
if a_uri == xmlns_uri:
if a_localname == 'xmlns':
qname = a_localname
else:
qname = 'xmlns:' + a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
elif a_uri:
prefix = self._current_context[a_uri]
if prefix:
qname = prefix + ":" + a_localname
else:
qname = a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
else:
attr = self.document.createAttribute(a_localname)
node.setAttributeNode(attr)
attr.value = value
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
PullDOM.startElementNS = startElementNS
#
# This is a runtime guerilla patch for minidom so
# that xmlns prefixed attributes dont raise AttributeErrors
# during cloning.
#
# Namespace declarations can appear in any start-tag, must look for xmlns
# prefixed attribute names during cloning.
#
# key (attr.namespaceURI, tag)
# ('http://www.w3.org/2000/xmlns/', u'xsd') <xml.dom.minidom.Attr instance at 0x82227c4>
# ('http://www.w3.org/2000/xmlns/', 'xmlns') <xml.dom.minidom.Attr instance at 0x8414b3c>
#
# xml.dom.minidom.Attr.nodeName = xmlns:xsd
# xml.dom.minidom.Attr.value = = http://www.w3.org/2001/XMLSchema
if 1:
def _clone_node(node, deep, newOwnerDocument):
"""
Clone a node and give it the new owner document.
Called by Node.cloneNode and Document.importNode
"""
if node.ownerDocument.isSameNode(newOwnerDocument):
operation = xml.dom.UserDataHandler.NODE_CLONED
else:
operation = xml.dom.UserDataHandler.NODE_IMPORTED
if node.nodeType == xml.dom.minidom.Node.ELEMENT_NODE:
clone = newOwnerDocument.createElementNS(node.namespaceURI,
node.nodeName)
for attr in node.attributes.values():
clone.setAttributeNS(attr.namespaceURI, attr.nodeName, attr.value)
prefix, tag = xml.dom.minidom._nssplit(attr.nodeName)
if prefix == 'xmlns':
a = clone.getAttributeNodeNS(attr.namespaceURI, tag)
elif prefix:
a = clone.getAttributeNodeNS(attr.namespaceURI, tag)
else:
a = clone.getAttributeNodeNS(attr.namespaceURI, attr.nodeName)
a.specified = attr.specified
if deep:
for child in node.childNodes:
c = xml.dom.minidom._clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == xml.dom.minidom.Node.DOCUMENT_FRAGMENT_NODE:
clone = newOwnerDocument.createDocumentFragment()
if deep:
for child in node.childNodes:
c = xml.dom.minidom._clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == xml.dom.minidom.Node.TEXT_NODE:
clone = newOwnerDocument.createTextNode(node.data)
elif node.nodeType == xml.dom.minidom.Node.CDATA_SECTION_NODE:
clone = newOwnerDocument.createCDATASection(node.data)
elif node.nodeType == xml.dom.minidom.Node.PROCESSING_INSTRUCTION_NODE:
clone = newOwnerDocument.createProcessingInstruction(node.target,
node.data)
elif node.nodeType == xml.dom.minidom.Node.COMMENT_NODE:
clone = newOwnerDocument.createComment(node.data)
elif node.nodeType == xml.dom.minidom.Node.ATTRIBUTE_NODE:
clone = newOwnerDocument.createAttributeNS(node.namespaceURI,
node.nodeName)
clone.specified = True
clone.value = node.value
elif node.nodeType == xml.dom.minidom.Node.DOCUMENT_TYPE_NODE:
assert node.ownerDocument is not newOwnerDocument
operation = xml.dom.UserDataHandler.NODE_IMPORTED
clone = newOwnerDocument.implementation.createDocumentType(
node.name, node.publicId, node.systemId)
clone.ownerDocument = newOwnerDocument
if deep:
clone.entities._seq = []
clone.notations._seq = []
for n in node.notations._seq:
notation = xml.dom.minidom.Notation(n.nodeName, n.publicId, n.systemId)
notation.ownerDocument = newOwnerDocument
clone.notations._seq.append(notation)
if hasattr(n, '_call_user_data_handler'):
n._call_user_data_handler(operation, n, notation)
for e in node.entities._seq:
entity = xml.dom.minidom.Entity(e.nodeName, e.publicId, e.systemId,
e.notationName)
entity.actualEncoding = e.actualEncoding
entity.encoding = e.encoding
entity.version = e.version
entity.ownerDocument = newOwnerDocument
clone.entities._seq.append(entity)
if hasattr(e, '_call_user_data_handler'):
e._call_user_data_handler(operation, n, entity)
else:
# Note the cloning of Document and DocumentType nodes is
# implemenetation specific. minidom handles those cases
# directly in the cloneNode() methods.
raise xml.dom.NotSupportedErr("Cannot clone node %s" % repr(node))
# Check for _call_user_data_handler() since this could conceivably
# used with other DOM implementations (one of the FourThought
# DOMs, perhaps?).
if hasattr(node, '_call_user_data_handler'):
node._call_user_data_handler(operation, node, clone)
return clone
xml.dom.minidom._clone_node = _clone_node
| Python |
# Copyright (c) 2003, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory (subject to receipt of
# any required approvals from the U.S. Dept. of Energy). All rights
# reserved.
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
ident = "$Id: XMLSchema.py,v 1.53 2005/02/18 13:50:14 warnes Exp $"
import types, weakref, urllib, sys
from threading import RLock
from Namespaces import XMLNS
from Utility import DOM, DOMException, Collection, SplitQName
from StringIO import StringIO
def GetSchema(component):
"""convience function for finding the parent XMLSchema instance.
"""
parent = component
while not isinstance(parent, XMLSchema):
parent = parent._parent()
return parent
class SchemaReader:
"""A SchemaReader creates XMLSchema objects from urls and xml data.
"""
def __init__(self, domReader=None, base_url=None):
"""domReader -- class must implement DOMAdapterInterface
base_url -- base url string
"""
self.__base_url = base_url
self.__readerClass = domReader
if not self.__readerClass:
self.__readerClass = DOMAdapter
self._includes = {}
self._imports = {}
def __setImports(self, schema):
"""Add dictionary of imports to schema instance.
schema -- XMLSchema instance
"""
for ns,val in schema.imports.items():
if self._imports.has_key(ns):
schema.addImportSchema(self._imports[ns])
def __setIncludes(self, schema):
"""Add dictionary of includes to schema instance.
schema -- XMLSchema instance
"""
for schemaLocation, val in schema.includes.items():
if self._includes.has_key(schemaLocation):
schema.addIncludeSchema(self._imports[schemaLocation])
def addSchemaByLocation(self, location, schema):
"""provide reader with schema document for a location.
"""
self._includes[location] = schema
def addSchemaByNamespace(self, schema):
"""provide reader with schema document for a targetNamespace.
"""
self._imports[schema.targetNamespace] = schema
def loadFromNode(self, parent, element):
"""element -- DOM node or document
parent -- WSDLAdapter instance
"""
reader = self.__readerClass(element)
schema = XMLSchema(parent)
#HACK to keep a reference
schema.wsdl = parent
schema.setBaseUrl(self.__base_url)
schema.load(reader)
return schema
def loadFromStream(self, file, url=None):
"""Return an XMLSchema instance loaded from a file object.
file -- file object
url -- base location for resolving imports/includes.
"""
reader = self.__readerClass()
reader.loadDocument(file)
schema = XMLSchema()
if url is not None:
schema.setBaseUrl(url)
schema.load(reader)
self.__setIncludes(schema)
self.__setImports(schema)
return schema
def loadFromString(self, data):
"""Return an XMLSchema instance loaded from an XML string.
data -- XML string
"""
return self.loadFromStream(StringIO(data))
def loadFromURL(self, url):
"""Return an XMLSchema instance loaded from the given url.
url -- URL to dereference
"""
reader = self.__readerClass()
if self.__base_url:
url = urllib.basejoin(self.__base_url,url)
reader.loadFromURL(url)
schema = XMLSchema()
schema.setBaseUrl(url)
schema.load(reader)
self.__setIncludes(schema)
self.__setImports(schema)
return schema
def loadFromFile(self, filename):
"""Return an XMLSchema instance loaded from the given file.
filename -- name of file to open
"""
if self.__base_url:
filename = urllib.basejoin(self.__base_url,filename)
file = open(filename, 'rb')
try:
schema = self.loadFromStream(file, filename)
finally:
file.close()
return schema
class SchemaError(Exception):
pass
###########################
# DOM Utility Adapters
##########################
class DOMAdapterInterface:
def hasattr(self, attr, ns=None):
"""return true if node has attribute
attr -- attribute to check for
ns -- namespace of attribute, by default None
"""
raise NotImplementedError, 'adapter method not implemented'
def getContentList(self, *contents):
"""returns an ordered list of child nodes
*contents -- list of node names to return
"""
raise NotImplementedError, 'adapter method not implemented'
def setAttributeDictionary(self, attributes):
"""set attribute dictionary
"""
raise NotImplementedError, 'adapter method not implemented'
def getAttributeDictionary(self):
"""returns a dict of node's attributes
"""
raise NotImplementedError, 'adapter method not implemented'
def getNamespace(self, prefix):
"""returns namespace referenced by prefix.
"""
raise NotImplementedError, 'adapter method not implemented'
def getTagName(self):
"""returns tagName of node
"""
raise NotImplementedError, 'adapter method not implemented'
def getParentNode(self):
"""returns parent element in DOMAdapter or None
"""
raise NotImplementedError, 'adapter method not implemented'
def loadDocument(self, file):
"""load a Document from a file object
file --
"""
raise NotImplementedError, 'adapter method not implemented'
def loadFromURL(self, url):
"""load a Document from an url
url -- URL to dereference
"""
raise NotImplementedError, 'adapter method not implemented'
class DOMAdapter(DOMAdapterInterface):
"""Adapter for ZSI.Utility.DOM
"""
def __init__(self, node=None):
"""Reset all instance variables.
element -- DOM document, node, or None
"""
if hasattr(node, 'documentElement'):
self.__node = node.documentElement
else:
self.__node = node
self.__attributes = None
def hasattr(self, attr, ns=None):
"""attr -- attribute
ns -- optional namespace, None means unprefixed attribute.
"""
if not self.__attributes:
self.setAttributeDictionary()
if ns:
return self.__attributes.get(ns,{}).has_key(attr)
return self.__attributes.has_key(attr)
def getContentList(self, *contents):
nodes = []
ELEMENT_NODE = self.__node.ELEMENT_NODE
for child in DOM.getElements(self.__node, None):
if child.nodeType == ELEMENT_NODE and\
SplitQName(child.tagName)[1] in contents:
nodes.append(child)
return map(self.__class__, nodes)
def setAttributeDictionary(self):
self.__attributes = {}
for v in self.__node._attrs.values():
self.__attributes[v.nodeName] = v.nodeValue
def getAttributeDictionary(self):
if not self.__attributes:
self.setAttributeDictionary()
return self.__attributes
def getTagName(self):
return self.__node.tagName
def getParentNode(self):
if self.__node.parentNode.nodeType == self.__node.ELEMENT_NODE:
return DOMAdapter(self.__node.parentNode)
return None
def getNamespace(self, prefix):
"""prefix -- deference namespace prefix in node's context.
Ascends parent nodes until found.
"""
namespace = None
if prefix == 'xmlns':
namespace = DOM.findDefaultNS(prefix, self.__node)
else:
try:
namespace = DOM.findNamespaceURI(prefix, self.__node)
except DOMException, ex:
if prefix != 'xml':
raise SchemaError, '%s namespace not declared for %s'\
%(prefix, self.__node._get_tagName())
namespace = XMLNS.XML
return namespace
def loadDocument(self, file):
self.__node = DOM.loadDocument(file)
if hasattr(self.__node, 'documentElement'):
self.__node = self.__node.documentElement
def loadFromURL(self, url):
self.__node = DOM.loadFromURL(url)
if hasattr(self.__node, 'documentElement'):
self.__node = self.__node.documentElement
class XMLBase:
""" These class variables are for string indentation.
"""
tag = None
__indent = 0
__rlock = RLock()
def __str__(self):
XMLBase.__rlock.acquire()
XMLBase.__indent += 1
tmp = "<" + str(self.__class__) + '>\n'
for k,v in self.__dict__.items():
tmp += "%s* %s = %s\n" %(XMLBase.__indent*' ', k, v)
XMLBase.__indent -= 1
XMLBase.__rlock.release()
return tmp
"""Marker Interface: can determine something about an instances properties by using
the provided convenience functions.
"""
class DefinitionMarker:
"""marker for definitions
"""
pass
class DeclarationMarker:
"""marker for declarations
"""
pass
class AttributeMarker:
"""marker for attributes
"""
pass
class AttributeGroupMarker:
"""marker for attribute groups
"""
pass
class WildCardMarker:
"""marker for wildcards
"""
pass
class ElementMarker:
"""marker for wildcards
"""
pass
class ReferenceMarker:
"""marker for references
"""
pass
class ModelGroupMarker:
"""marker for model groups
"""
pass
class AllMarker(ModelGroupMarker):
"""marker for all model group
"""
pass
class ChoiceMarker(ModelGroupMarker):
"""marker for choice model group
"""
pass
class SequenceMarker(ModelGroupMarker):
"""marker for sequence model group
"""
pass
class ExtensionMarker:
"""marker for extensions
"""
pass
class RestrictionMarker:
"""marker for restrictions
"""
facets = ['enumeration', 'length', 'maxExclusive', 'maxInclusive',\
'maxLength', 'minExclusive', 'minInclusive', 'minLength',\
'pattern', 'fractionDigits', 'totalDigits', 'whiteSpace']
class SimpleMarker:
"""marker for simple type information
"""
pass
class ListMarker:
"""marker for simple type list
"""
pass
class UnionMarker:
"""marker for simple type Union
"""
pass
class ComplexMarker:
"""marker for complex type information
"""
pass
class LocalMarker:
"""marker for complex type information
"""
pass
class MarkerInterface:
def isDefinition(self):
return isinstance(self, DefinitionMarker)
def isDeclaration(self):
return isinstance(self, DeclarationMarker)
def isAttribute(self):
return isinstance(self, AttributeMarker)
def isAttributeGroup(self):
return isinstance(self, AttributeGroupMarker)
def isElement(self):
return isinstance(self, ElementMarker)
def isReference(self):
return isinstance(self, ReferenceMarker)
def isWildCard(self):
return isinstance(self, WildCardMarker)
def isModelGroup(self):
return isinstance(self, ModelGroupMarker)
def isAll(self):
return isinstance(self, AllMarker)
def isChoice(self):
return isinstance(self, ChoiceMarker)
def isSequence(self):
return isinstance(self, SequenceMarker)
def isExtension(self):
return isinstance(self, ExtensionMarker)
def isRestriction(self):
return isinstance(self, RestrictionMarker)
def isSimple(self):
return isinstance(self, SimpleMarker)
def isComplex(self):
return isinstance(self, ComplexMarker)
def isLocal(self):
return isinstance(self, LocalMarker)
def isList(self):
return isinstance(self, ListMarker)
def isUnion(self):
return isinstance(self, UnionMarker)
##########################################################
# Schema Components
#########################################################
class XMLSchemaComponent(XMLBase, MarkerInterface):
"""
class variables:
required -- list of required attributes
attributes -- dict of default attribute values, including None.
Value can be a function for runtime dependencies.
contents -- dict of namespace keyed content lists.
'xsd' content of xsd namespace.
xmlns_key -- key for declared xmlns namespace.
xmlns -- xmlns is special prefix for namespace dictionary
xml -- special xml prefix for xml namespace.
"""
required = []
attributes = {}
contents = {}
xmlns_key = ''
xmlns = 'xmlns'
xml = 'xml'
def __init__(self, parent=None):
"""parent -- parent instance
instance variables:
attributes -- dictionary of node's attributes
"""
self.attributes = None
self._parent = parent
if self._parent:
self._parent = weakref.ref(parent)
if not self.__class__ == XMLSchemaComponent\
and not (type(self.__class__.required) == type(XMLSchemaComponent.required)\
and type(self.__class__.attributes) == type(XMLSchemaComponent.attributes)\
and type(self.__class__.contents) == type(XMLSchemaComponent.contents)):
raise RuntimeError, 'Bad type for a class variable in %s' %self.__class__
def getItemTrace(self):
"""Returns a node trace up to the <schema> item.
"""
item, path, name, ref = self, [], 'name', 'ref'
while not isinstance(item,XMLSchema) and not isinstance(item,WSDLToolsAdapter):
attr = item.getAttribute(name)
if attr is None:
attr = item.getAttribute(ref)
if attr is None: path.append('<%s>' %(item.tag))
else: path.append('<%s ref="%s">' %(item.tag, attr))
else:
path.append('<%s name="%s">' %(item.tag,attr))
item = item._parent()
try:
tns = item.getTargetNamespace()
except:
tns = ''
path.append('<%s targetNamespace="%s">' %(item.tag, tns))
path.reverse()
return ''.join(path)
def getTargetNamespace(self):
"""return targetNamespace
"""
parent = self
targetNamespace = 'targetNamespace'
tns = self.attributes.get(targetNamespace)
while not tns:
parent = parent._parent()
tns = parent.attributes.get(targetNamespace)
return tns
def getAttributeDeclaration(self, attribute):
"""attribute -- attribute with a QName value (eg. type).
collection -- check types collection in parent Schema instance
"""
return self.getQNameAttribute('attr_decl', attribute)
def getAttributeGroup(self, attribute):
"""attribute -- attribute with a QName value (eg. type).
collection -- check types collection in parent Schema instance
"""
return self.getQNameAttribute('attr_groups', attribute)
def getTypeDefinition(self, attribute):
"""attribute -- attribute with a QName value (eg. type).
collection -- check types collection in parent Schema instance
"""
return self.getQNameAttribute('types', attribute)
def getElementDeclaration(self, attribute):
"""attribute -- attribute with a QName value (eg. element).
collection -- check elements collection in parent Schema instance.
"""
return self.getQNameAttribute('elements', attribute)
def getModelGroup(self, attribute):
"""attribute -- attribute with a QName value (eg. ref).
collection -- check model_group collection in parent Schema instance.
"""
return self.getQNameAttribute('model_groups', attribute)
def getQNameAttribute(self, collection, attribute):
"""returns object instance representing QName --> (namespace,name),
or if does not exist return None.
attribute -- an information item attribute, with a QName value.
collection -- collection in parent Schema instance to search.
"""
obj = None
tdc = self.attributes.get(attribute)
if tdc:
parent = GetSchema(self)
targetNamespace = tdc.getTargetNamespace()
if parent.targetNamespace == targetNamespace:
item = tdc.getName()
try:
obj = getattr(parent, collection)[item]
except KeyError, ex:
raise KeyError, "targetNamespace(%s) collection(%s) has no item(%s)"\
%(targetNamespace, collection, item)
elif parent.imports.has_key(targetNamespace):
schema = parent.imports[targetNamespace].getSchema()
item = tdc.getName()
try:
obj = getattr(schema, collection)[item]
except KeyError, ex:
raise KeyError, "targetNamespace(%s) collection(%s) has no item(%s)"\
%(targetNamespace, collection, item)
return obj
def getXMLNS(self, prefix=None):
"""deference prefix or by default xmlns, returns namespace.
"""
if prefix == XMLSchemaComponent.xml:
return XMLNS.XML
parent = self
ns = self.attributes[XMLSchemaComponent.xmlns].get(prefix or\
XMLSchemaComponent.xmlns_key)
while not ns:
parent = parent._parent()
ns = parent.attributes[XMLSchemaComponent.xmlns].get(prefix or\
XMLSchemaComponent.xmlns_key)
if not ns and isinstance(parent, WSDLToolsAdapter):
raise SchemaError, 'unknown prefix %s' %prefix
return ns
def getAttribute(self, attribute):
"""return requested attribute or None
"""
return self.attributes.get(attribute)
def setAttributes(self, node):
"""Sets up attribute dictionary, checks for required attributes and
sets default attribute values. attr is for default attribute values
determined at runtime.
structure of attributes dictionary
['xmlns'][xmlns_key] -- xmlns namespace
['xmlns'][prefix] -- declared namespace prefix
[namespace][prefix] -- attributes declared in a namespace
[attribute] -- attributes w/o prefix, default namespaces do
not directly apply to attributes, ie Name can't collide
with QName.
"""
self.attributes = {XMLSchemaComponent.xmlns:{}}
for k,v in node.getAttributeDictionary().items():
prefix,value = SplitQName(k)
if value == XMLSchemaComponent.xmlns:
self.attributes[value][prefix or XMLSchemaComponent.xmlns_key] = v
elif prefix:
ns = node.getNamespace(prefix)
if not ns:
raise SchemaError, 'no namespace for attribute prefix %s'\
%prefix
if not self.attributes.has_key(ns):
self.attributes[ns] = {}
elif self.attributes[ns].has_key(value):
raise SchemaError, 'attribute %s declared multiple times in %s'\
%(value, ns)
self.attributes[ns][value] = v
elif not self.attributes.has_key(value):
self.attributes[value] = v
else:
raise SchemaError, 'attribute %s declared multiple times' %value
if not isinstance(self, WSDLToolsAdapter):
self.__checkAttributes()
self.__setAttributeDefaults()
#set QNames
for k in ['type', 'element', 'base', 'ref', 'substitutionGroup', 'itemType']:
if self.attributes.has_key(k):
prefix, value = SplitQName(self.attributes.get(k))
self.attributes[k] = \
TypeDescriptionComponent((self.getXMLNS(prefix), value))
#Union, memberTypes is a whitespace separated list of QNames
for k in ['memberTypes']:
if self.attributes.has_key(k):
qnames = self.attributes[k]
self.attributes[k] = []
for qname in qnames.split():
prefix, value = SplitQName(qname)
self.attributes['memberTypes'].append(\
TypeDescriptionComponent(\
(self.getXMLNS(prefix), value)))
def getContents(self, node):
"""retrieve xsd contents
"""
return node.getContentList(*self.__class__.contents['xsd'])
def __setAttributeDefaults(self):
"""Looks for default values for unset attributes. If
class variable representing attribute is None, then
it must be defined as an instance variable.
"""
for k,v in self.__class__.attributes.items():
if v and not self.attributes.has_key(k):
if isinstance(v, types.FunctionType):
self.attributes[k] = v(self)
else:
self.attributes[k] = v
def __checkAttributes(self):
"""Checks that required attributes have been defined,
attributes w/default cannot be required. Checks
all defined attributes are legal, attribute
references are not subject to this test.
"""
for a in self.__class__.required:
if not self.attributes.has_key(a):
raise SchemaError,\
'class instance %s, missing required attribute %s'\
%(self.__class__, a)
for a in self.attributes.keys():
if (a not in (XMLSchemaComponent.xmlns, XMLNS.XML)) and\
(a not in self.__class__.attributes.keys()) and not\
(self.isAttribute() and self.isReference()):
raise SchemaError, '%s, unknown attribute(%s,%s)' \
%(self.getItemTrace(), a, self.attributes[a])
class WSDLToolsAdapter(XMLSchemaComponent):
"""WSDL Adapter to grab the attributes from the wsdl document node.
"""
attributes = {'name':None, 'targetNamespace':None}
tag = 'definitions'
def __init__(self, wsdl):
XMLSchemaComponent.__init__(self, parent=wsdl)
self.setAttributes(DOMAdapter(wsdl.document))
def getImportSchemas(self):
"""returns WSDLTools.WSDL types Collection
"""
return self._parent().types
class Notation(XMLSchemaComponent):
"""<notation>
parent:
schema
attributes:
id -- ID
name -- NCName, Required
public -- token, Required
system -- anyURI
contents:
annotation?
"""
required = ['name', 'public']
attributes = {'id':None, 'name':None, 'public':None, 'system':None}
contents = {'xsd':('annotation')}
tag = 'notation'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class Annotation(XMLSchemaComponent):
"""<annotation>
parent:
all,any,anyAttribute,attribute,attributeGroup,choice,complexContent,
complexType,element,extension,field,group,import,include,key,keyref,
list,notation,redefine,restriction,schema,selector,simpleContent,
simpleType,union,unique
attributes:
id -- ID
contents:
(documentation | appinfo)*
"""
attributes = {'id':None}
contents = {'xsd':('documentation', 'appinfo')}
tag = 'annotation'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'documentation':
#print_debug('class %s, documentation skipped' %self.__class__, 5)
continue
elif component == 'appinfo':
#print_debug('class %s, appinfo skipped' %self.__class__, 5)
continue
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class Documentation(XMLSchemaComponent):
"""<documentation>
parent:
annotation
attributes:
source, anyURI
xml:lang, language
contents:
mixed, any
"""
attributes = {'source':None, 'xml:lang':None}
contents = {'xsd':('mixed', 'any')}
tag = 'documentation'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'mixed':
#print_debug('class %s, mixed skipped' %self.__class__, 5)
continue
elif component == 'any':
#print_debug('class %s, any skipped' %self.__class__, 5)
continue
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class Appinfo(XMLSchemaComponent):
"""<appinfo>
parent:
annotation
attributes:
source, anyURI
contents:
mixed, any
"""
attributes = {'source':None, 'anyURI':None}
contents = {'xsd':('mixed', 'any')}
tag = 'appinfo'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'mixed':
#print_debug('class %s, mixed skipped' %self.__class__, 5)
continue
elif component == 'any':
#print_debug('class %s, any skipped' %self.__class__, 5)
continue
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class XMLSchemaFake:
# This is temporary, for the benefit of WSDL until the real thing works.
def __init__(self, element):
self.targetNamespace = DOM.getAttr(element, 'targetNamespace')
self.element = element
class XMLSchema(XMLSchemaComponent):
"""A schema is a collection of schema components derived from one
or more schema documents, that is, one or more <schema> element
information items. It represents the abstract notion of a schema
rather than a single schema document (or other representation).
<schema>
parent:
ROOT
attributes:
id -- ID
version -- token
xml:lang -- language
targetNamespace -- anyURI
attributeFormDefault -- 'qualified' | 'unqualified', 'unqualified'
elementFormDefault -- 'qualified' | 'unqualified', 'unqualified'
blockDefault -- '#all' | list of
('substitution | 'extension' | 'restriction')
finalDefault -- '#all' | list of
('extension' | 'restriction' | 'list' | 'union')
contents:
((include | import | redefine | annotation)*,
(attribute, attributeGroup, complexType, element, group,
notation, simpleType)*, annotation*)*
attributes -- schema attributes
imports -- import statements
includes -- include statements
redefines --
types -- global simpleType, complexType definitions
elements -- global element declarations
attr_decl -- global attribute declarations
attr_groups -- attribute Groups
model_groups -- model Groups
notations -- global notations
"""
attributes = {'id':None,
'version':None,
'xml:lang':None,
'targetNamespace':None,
'attributeFormDefault':'unqualified',
'elementFormDefault':'unqualified',
'blockDefault':None,
'finalDefault':None}
contents = {'xsd':('include', 'import', 'redefine', 'annotation', 'attribute',\
'attributeGroup', 'complexType', 'element', 'group',\
'notation', 'simpleType', 'annotation')}
empty_namespace = ''
tag = 'schema'
def __init__(self, parent=None):
"""parent --
instance variables:
targetNamespace -- schema's declared targetNamespace, or empty string.
_imported_schemas -- namespace keyed dict of schema dependencies, if
a schema is provided instance will not resolve import statement.
_included_schemas -- schemaLocation keyed dict of component schemas,
if schema is provided instance will not resolve include statement.
_base_url -- needed for relative URLs support, only works with URLs
relative to initial document.
includes -- collection of include statements
imports -- collection of import statements
elements -- collection of global element declarations
types -- collection of global type definitions
attr_decl -- collection of global attribute declarations
attr_groups -- collection of global attribute group definitions
model_groups -- collection of model group definitions
notations -- collection of notations
"""
self.targetNamespace = None
XMLSchemaComponent.__init__(self, parent)
f = lambda k: k.attributes['name']
ns = lambda k: k.attributes['namespace']
sl = lambda k: k.attributes['schemaLocation']
self.includes = Collection(self, key=sl)
self.imports = Collection(self, key=ns)
self.elements = Collection(self, key=f)
self.types = Collection(self, key=f)
self.attr_decl = Collection(self, key=f)
self.attr_groups = Collection(self, key=f)
self.model_groups = Collection(self, key=f)
self.notations = Collection(self, key=f)
self._imported_schemas = {}
self._included_schemas = {}
self._base_url = None
def addImportSchema(self, schema):
"""for resolving import statements in Schema instance
schema -- schema instance
_imported_schemas
"""
if not isinstance(schema, XMLSchema):
raise TypeError, 'expecting a Schema instance'
if schema.targetNamespace != self.targetNamespace:
self._imported_schemas[schema.targetNamespace] = schema
else:
raise SchemaError, 'import schema bad targetNamespace'
def addIncludeSchema(self, schemaLocation, schema):
"""for resolving include statements in Schema instance
schemaLocation -- schema location
schema -- schema instance
_included_schemas
"""
if not isinstance(schema, XMLSchema):
raise TypeError, 'expecting a Schema instance'
if not schema.targetNamespace or\
schema.targetNamespace == self.targetNamespace:
self._included_schemas[schemaLocation] = schema
else:
raise SchemaError, 'include schema bad targetNamespace'
def setImportSchemas(self, schema_dict):
"""set the import schema dictionary, which is used to
reference depedent schemas.
"""
self._imported_schemas = schema_dict
def getImportSchemas(self):
"""get the import schema dictionary, which is used to
reference depedent schemas.
"""
return self._imported_schemas
def getSchemaNamespacesToImport(self):
"""returns tuple of namespaces the schema instance has declared
itself to be depedent upon.
"""
return tuple(self.includes.keys())
def setIncludeSchemas(self, schema_dict):
"""set the include schema dictionary, which is keyed with
schemaLocation (uri).
This is a means of providing
schemas to the current schema for content inclusion.
"""
self._included_schemas = schema_dict
def getIncludeSchemas(self):
"""get the include schema dictionary, which is keyed with
schemaLocation (uri).
"""
return self._included_schemas
def getBaseUrl(self):
"""get base url, used for normalizing all relative uri's
"""
return self._base_url
def setBaseUrl(self, url):
"""set base url, used for normalizing all relative uri's
"""
self._base_url = url
def getElementFormDefault(self):
"""return elementFormDefault attribute
"""
return self.attributes.get('elementFormDefault')
def isElementFormDefaultQualified(self):
return self.attributes.get('elementFormDefault') == 'qualified'
def getAttributeFormDefault(self):
"""return attributeFormDefault attribute
"""
return self.attributes.get('attributeFormDefault')
def getBlockDefault(self):
"""return blockDefault attribute
"""
return self.attributes.get('blockDefault')
def getFinalDefault(self):
"""return finalDefault attribute
"""
return self.attributes.get('finalDefault')
def load(self, node):
pnode = node.getParentNode()
if pnode:
pname = SplitQName(pnode.getTagName())[1]
if pname == 'types':
attributes = {}
self.setAttributes(pnode)
attributes.update(self.attributes)
self.setAttributes(node)
for k,v in attributes['xmlns'].items():
if not self.attributes['xmlns'].has_key(k):
self.attributes['xmlns'][k] = v
else:
self.setAttributes(node)
else:
self.setAttributes(node)
self.targetNamespace = self.getTargetNamespace()
contents = self.getContents(node)
indx = 0
num = len(contents)
while indx < num:
while indx < num:
node = contents[indx]
component = SplitQName(node.getTagName())[1]
if component == 'include':
tp = self.__class__.Include(self)
tp.fromDom(node)
self.includes[tp.attributes['schemaLocation']] = tp
schema = tp.getSchema()
if schema.targetNamespace and \
schema.targetNamespace != self.targetNamespace:
raise SchemaError, 'included schema bad targetNamespace'
for collection in ['imports','elements','types',\
'attr_decl','attr_groups','model_groups','notations']:
for k,v in getattr(schema,collection).items():
if not getattr(self,collection).has_key(k):
v._parent = weakref.ref(self)
getattr(self,collection)[k] = v
elif component == 'import':
tp = self.__class__.Import(self)
tp.fromDom(node)
import_ns = tp.getAttribute('namespace')
if import_ns:
if import_ns == self.targetNamespace:
raise SchemaError,\
'import and schema have same targetNamespace'
self.imports[import_ns] = tp
else:
self.imports[self.__class__.empty_namespace] = tp
if not self.getImportSchemas().has_key(import_ns) and\
tp.getAttribute('schemaLocation'):
self.addImportSchema(tp.getSchema())
elif component == 'redefine':
#print_debug('class %s, redefine skipped' %self.__class__, 5)
pass
elif component == 'annotation':
#print_debug('class %s, annotation skipped' %self.__class__, 5)
pass
else:
break
indx += 1
# (attribute, attributeGroup, complexType, element, group,
# notation, simpleType)*, annotation*)*
while indx < num:
node = contents[indx]
component = SplitQName(node.getTagName())[1]
if component == 'attribute':
tp = AttributeDeclaration(self)
tp.fromDom(node)
self.attr_decl[tp.getAttribute('name')] = tp
elif component == 'attributeGroup':
tp = AttributeGroupDefinition(self)
tp.fromDom(node)
self.attr_groups[tp.getAttribute('name')] = tp
elif component == 'complexType':
tp = ComplexType(self)
tp.fromDom(node)
self.types[tp.getAttribute('name')] = tp
elif component == 'element':
tp = ElementDeclaration(self)
tp.fromDom(node)
self.elements[tp.getAttribute('name')] = tp
elif component == 'group':
tp = ModelGroupDefinition(self)
tp.fromDom(node)
self.model_groups[tp.getAttribute('name')] = tp
elif component == 'notation':
tp = Notation(self)
tp.fromDom(node)
self.notations[tp.getAttribute('name')] = tp
elif component == 'simpleType':
tp = SimpleType(self)
tp.fromDom(node)
self.types[tp.getAttribute('name')] = tp
else:
break
indx += 1
while indx < num:
node = contents[indx]
component = SplitQName(node.getTagName())[1]
if component == 'annotation':
#print_debug('class %s, annotation 2 skipped' %self.__class__, 5)
pass
else:
break
indx += 1
class Import(XMLSchemaComponent):
"""<import>
parent:
schema
attributes:
id -- ID
namespace -- anyURI
schemaLocation -- anyURI
contents:
annotation?
"""
attributes = {'id':None,
'namespace':None,
'schemaLocation':None}
contents = {'xsd':['annotation']}
tag = 'import'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self._schema = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
if self.attributes['namespace'] == self.getTargetNamespace():
raise SchemaError, 'namespace of schema and import match'
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
def getSchema(self):
"""if schema is not defined, first look for a Schema class instance
in parent Schema. Else if not defined resolve schemaLocation
and create a new Schema class instance, and keep a hard reference.
"""
if not self._schema:
ns = self.attributes['namespace']
schema = self._parent().getImportSchemas().get(ns)
if not schema and self._parent()._parent:
schema = self._parent()._parent().getImportSchemas().get(ns)
if not schema:
url = self.attributes.get('schemaLocation')
if not url:
raise SchemaError, 'namespace(%s) is unknown' %ns
base_url = self._parent().getBaseUrl()
reader = SchemaReader(base_url=base_url)
reader._imports = self._parent().getImportSchemas()
reader._includes = self._parent().getIncludeSchemas()
self._schema = reader.loadFromURL(url)
return self._schema or schema
class Include(XMLSchemaComponent):
"""<include schemaLocation>
parent:
schema
attributes:
id -- ID
schemaLocation -- anyURI, required
contents:
annotation?
"""
required = ['schemaLocation']
attributes = {'id':None,
'schemaLocation':None}
contents = {'xsd':['annotation']}
tag = 'include'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self._schema = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
def getSchema(self):
"""if schema is not defined, first look for a Schema class instance
in parent Schema. Else if not defined resolve schemaLocation
and create a new Schema class instance.
"""
if not self._schema:
schema = self._parent()
self._schema = schema.getIncludeSchemas().get(\
self.attributes['schemaLocation']
)
if not self._schema:
url = self.attributes['schemaLocation']
reader = SchemaReader(base_url=schema.getBaseUrl())
reader._imports = schema.getImportSchemas()
reader._includes = schema.getIncludeSchemas()
self._schema = reader.loadFromURL(url)
return self._schema
class AttributeDeclaration(XMLSchemaComponent,\
AttributeMarker,\
DeclarationMarker):
"""<attribute name>
parent:
schema
attributes:
id -- ID
name -- NCName, required
type -- QName
default -- string
fixed -- string
contents:
annotation?, simpleType?
"""
required = ['name']
attributes = {'id':None,
'name':None,
'type':None,
'default':None,
'fixed':None}
contents = {'xsd':['annotation','simpleType']}
tag = 'attribute'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
""" No list or union support
"""
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
elif component == 'simpleType':
self.content = AnonymousSimpleType(self)
self.content.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class LocalAttributeDeclaration(AttributeDeclaration,\
AttributeMarker,\
LocalMarker,\
DeclarationMarker):
"""<attribute name>
parent:
complexType, restriction, extension, attributeGroup
attributes:
id -- ID
name -- NCName, required
type -- QName
form -- ('qualified' | 'unqualified'), schema.attributeFormDefault
use -- ('optional' | 'prohibited' | 'required'), optional
default -- string
fixed -- string
contents:
annotation?, simpleType?
"""
required = ['name']
attributes = {'id':None,
'name':None,
'type':None,
'form':lambda self: GetSchema(self).getAttributeFormDefault(),
'use':'optional',
'default':None,
'fixed':None}
contents = {'xsd':['annotation','simpleType']}
def __init__(self, parent):
AttributeDeclaration.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
elif component == 'simpleType':
self.content = AnonymousSimpleType(self)
self.content.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class AttributeWildCard(XMLSchemaComponent,\
AttributeMarker,\
DeclarationMarker,\
WildCardMarker):
"""<anyAttribute>
parents:
complexType, restriction, extension, attributeGroup
attributes:
id -- ID
namespace -- '##any' | '##other' |
(anyURI* | '##targetNamespace' | '##local'), ##any
processContents -- 'lax' | 'skip' | 'strict', strict
contents:
annotation?
"""
attributes = {'id':None,
'namespace':'##any',
'processContents':'strict'}
contents = {'xsd':['annotation']}
tag = 'anyAttribute'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class AttributeReference(XMLSchemaComponent,\
AttributeMarker,\
ReferenceMarker):
"""<attribute ref>
parents:
complexType, restriction, extension, attributeGroup
attributes:
id -- ID
ref -- QName, required
use -- ('optional' | 'prohibited' | 'required'), optional
default -- string
fixed -- string
contents:
annotation?
"""
required = ['ref']
attributes = {'id':None,
'ref':None,
'use':'optional',
'default':None,
'fixed':None}
contents = {'xsd':['annotation']}
tag = 'attribute'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def getAttributeDeclaration(self, attribute='ref'):
return XMLSchemaComponent.getAttributeDeclaration(self, attribute)
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class AttributeGroupDefinition(XMLSchemaComponent,\
AttributeGroupMarker,\
DefinitionMarker):
"""<attributeGroup name>
parents:
schema, redefine
attributes:
id -- ID
name -- NCName, required
contents:
annotation?, (attribute | attributeGroup)*, anyAttribute?
"""
required = ['name']
attributes = {'id':None,
'name':None}
contents = {'xsd':['annotation', 'attribute', 'attributeGroup', 'anyAttribute']}
tag = 'attributeGroup'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.attr_content = None
def getAttributeContent(self):
return self.attr_content
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for indx in range(len(contents)):
component = SplitQName(contents[indx].getTagName())[1]
if (component == 'annotation') and (not indx):
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
elif component == 'attribute':
if contents[indx].hasattr('name'):
content.append(LocalAttributeDeclaration(self))
elif contents[indx].hasattr('ref'):
content.append(AttributeReference(self))
else:
raise SchemaError, 'Unknown attribute type'
content[-1].fromDom(contents[indx])
elif component == 'attributeGroup':
content.append(AttributeGroupReference(self))
content[-1].fromDom(contents[indx])
elif component == 'anyAttribute':
if len(contents) != indx+1:
raise SchemaError, 'anyAttribute is out of order in %s' %self.getItemTrace()
content.append(AttributeWildCard(self))
content[-1].fromDom(contents[indx])
else:
raise SchemaError, 'Unknown component (%s)' %(contents[indx].getTagName())
self.attr_content = tuple(content)
class AttributeGroupReference(XMLSchemaComponent,\
AttributeGroupMarker,\
ReferenceMarker):
"""<attributeGroup ref>
parents:
complexType, restriction, extension, attributeGroup
attributes:
id -- ID
ref -- QName, required
contents:
annotation?
"""
required = ['ref']
attributes = {'id':None,
'ref':None}
contents = {'xsd':['annotation']}
tag = 'attributeGroup'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def getAttributeGroup(self, attribute='ref'):
"""attribute -- attribute with a QName value (eg. type).
collection -- check types collection in parent Schema instance
"""
return XMLSchemaComponent.getQNameAttribute(self, 'attr_groups', attribute)
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
######################################################
# Elements
#####################################################
class IdentityConstrants(XMLSchemaComponent):
"""Allow one to uniquely identify nodes in a document and ensure the
integrity of references between them.
attributes -- dictionary of attributes
selector -- XPath to selected nodes
fields -- list of XPath to key field
"""
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.selector = None
self.fields = None
self.annotation = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
fields = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
elif component == 'selector':
self.selector = self.Selector(self)
self.selector.fromDom(i)
continue
elif component == 'field':
fields.append(self.Field(self))
fields[-1].fromDom(i)
continue
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.fields = tuple(fields)
class Constraint(XMLSchemaComponent):
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class Selector(Constraint):
"""<selector xpath>
parent:
unique, key, keyref
attributes:
id -- ID
xpath -- XPath subset, required
contents:
annotation?
"""
required = ['xpath']
attributes = {'id':None,
'xpath':None}
contents = {'xsd':['annotation']}
tag = 'selector'
class Field(Constraint):
"""<field xpath>
parent:
unique, key, keyref
attributes:
id -- ID
xpath -- XPath subset, required
contents:
annotation?
"""
required = ['xpath']
attributes = {'id':None,
'xpath':None}
contents = {'xsd':['annotation']}
tag = 'field'
class Unique(IdentityConstrants):
"""<unique name> Enforce fields are unique w/i a specified scope.
parent:
element
attributes:
id -- ID
name -- NCName, required
contents:
annotation?, selector, field+
"""
required = ['name']
attributes = {'id':None,
'name':None}
contents = {'xsd':['annotation', 'selector', 'field']}
tag = 'unique'
class Key(IdentityConstrants):
"""<key name> Enforce fields are unique w/i a specified scope, and all
field values are present w/i document. Fields cannot
be nillable.
parent:
element
attributes:
id -- ID
name -- NCName, required
contents:
annotation?, selector, field+
"""
required = ['name']
attributes = {'id':None,
'name':None}
contents = {'xsd':['annotation', 'selector', 'field']}
tag = 'key'
class KeyRef(IdentityConstrants):
"""<keyref name refer> Ensure a match between two sets of values in an
instance.
parent:
element
attributes:
id -- ID
name -- NCName, required
refer -- QName, required
contents:
annotation?, selector, field+
"""
required = ['name', 'refer']
attributes = {'id':None,
'name':None,
'refer':None}
contents = {'xsd':['annotation', 'selector', 'field']}
tag = 'keyref'
class ElementDeclaration(XMLSchemaComponent,\
ElementMarker,\
DeclarationMarker):
"""<element name>
parents:
schema
attributes:
id -- ID
name -- NCName, required
type -- QName
default -- string
fixed -- string
nillable -- boolean, false
abstract -- boolean, false
substitutionGroup -- QName
block -- ('#all' | ('substition' | 'extension' | 'restriction')*),
schema.blockDefault
final -- ('#all' | ('extension' | 'restriction')*),
schema.finalDefault
contents:
annotation?, (simpleType,complexType)?, (key | keyref | unique)*
"""
required = ['name']
attributes = {'id':None,
'name':None,
'type':None,
'default':None,
'fixed':None,
'nillable':0,
'abstract':0,
'substitutionGroup':None,
'block':lambda self: self._parent().getBlockDefault(),
'final':lambda self: self._parent().getFinalDefault()}
contents = {'xsd':['annotation', 'simpleType', 'complexType', 'key',\
'keyref', 'unique']}
tag = 'element'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
self.constraints = ()
def isQualified(self):
'''Global elements are always qualified.
'''
return True
def getElementDeclaration(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def getTypeDefinition(self, attribute=None):
'''If attribute is None, "type" is assumed, return the corresponding
representation of the global type definition (TypeDefinition),
or the local definition if don't find "type". To maintain backwards
compat, if attribute is provided call base class method.
'''
if attribute:
return XMLSchemaComponent.getTypeDefinition(self, attribute)
gt = XMLSchemaComponent.getTypeDefinition(self, 'type')
if gt:
return gt
return self.content
def getConstraints(self):
return self._constraints
def setConstraints(self, constraints):
self._constraints = tuple(constraints)
constraints = property(getConstraints, setConstraints, None, "tuple of key, keyref, unique constraints")
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
constraints = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
elif component == 'simpleType' and not self.content:
self.content = AnonymousSimpleType(self)
self.content.fromDom(i)
elif component == 'complexType' and not self.content:
self.content = LocalComplexType(self)
self.content.fromDom(i)
elif component == 'key':
constraints.append(Key(self))
constraints[-1].fromDom(i)
elif component == 'keyref':
constraints.append(KeyRef(self))
constraints[-1].fromDom(i)
elif component == 'unique':
constraints.append(Unique(self))
constraints[-1].fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.constraints = constraints
class LocalElementDeclaration(ElementDeclaration,\
LocalMarker):
"""<element>
parents:
all, choice, sequence
attributes:
id -- ID
name -- NCName, required
form -- ('qualified' | 'unqualified'), schema.elementFormDefault
type -- QName
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
default -- string
fixed -- string
nillable -- boolean, false
block -- ('#all' | ('extension' | 'restriction')*), schema.blockDefault
contents:
annotation?, (simpleType,complexType)?, (key | keyref | unique)*
"""
required = ['name']
attributes = {'id':None,
'name':None,
'form':lambda self: GetSchema(self).getElementFormDefault(),
'type':None,
'minOccurs':'1',
'maxOccurs':'1',
'default':None,
'fixed':None,
'nillable':0,
'abstract':0,
'block':lambda self: GetSchema(self).getBlockDefault()}
contents = {'xsd':['annotation', 'simpleType', 'complexType', 'key',\
'keyref', 'unique']}
def isQualified(self):
'''Local elements can be qualified or unqualifed according
to the attribute form, or the elementFormDefault. By default
local elements are unqualified.
'''
form = self.getAttribute('form')
if form == 'qualified':
return True
if form == 'unqualified':
return False
raise SchemaError, 'Bad form (%s) for element: %s' %(form, self.getItemTrace())
class ElementReference(XMLSchemaComponent,\
ElementMarker,\
ReferenceMarker):
"""<element ref>
parents:
all, choice, sequence
attributes:
id -- ID
ref -- QName, required
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
contents:
annotation?
"""
required = ['ref']
attributes = {'id':None,
'ref':None,
'minOccurs':'1',
'maxOccurs':'1'}
contents = {'xsd':['annotation']}
tag = 'element'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def getElementDeclaration(self, attribute=None):
'''If attribute is None, "ref" is assumed, return the corresponding
representation of the global element declaration (ElementDeclaration),
To maintain backwards compat, if attribute is provided call base class method.
'''
if attribute:
return XMLSchemaComponent.getElementDeclaration(self, attribute)
return XMLSchemaComponent.getElementDeclaration(self, 'ref')
def fromDom(self, node):
self.annotation = None
self.setAttributes(node)
for i in self.getContents(node):
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class ElementWildCard(LocalElementDeclaration,\
WildCardMarker):
"""<any>
parents:
choice, sequence
attributes:
id -- ID
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
namespace -- '##any' | '##other' |
(anyURI* | '##targetNamespace' | '##local'), ##any
processContents -- 'lax' | 'skip' | 'strict', strict
contents:
annotation?
"""
required = []
attributes = {'id':None,
'minOccurs':'1',
'maxOccurs':'1',
'namespace':'##any',
'processContents':'strict'}
contents = {'xsd':['annotation']}
tag = 'any'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def isQualified(self):
'''Global elements are always qualified, but if processContents
are not strict could have dynamically generated local elements.
'''
return GetSchema(self).isElementFormDefaultQualified()
def getTypeDefinition(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def fromDom(self, node):
self.annotation = None
self.setAttributes(node)
for i in self.getContents(node):
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
######################################################
# Model Groups
#####################################################
class Sequence(XMLSchemaComponent,\
SequenceMarker):
"""<sequence>
parents:
complexType, extension, restriction, group, choice, sequence
attributes:
id -- ID
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
contents:
annotation?, (element | group | choice | sequence | any)*
"""
attributes = {'id':None,
'minOccurs':'1',
'maxOccurs':'1'}
contents = {'xsd':['annotation', 'element', 'group', 'choice', 'sequence',\
'any']}
tag = 'sequence'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
continue
elif component == 'element':
if i.hasattr('ref'):
content.append(ElementReference(self))
else:
content.append(LocalElementDeclaration(self))
elif component == 'group':
content.append(ModelGroupReference(self))
elif component == 'choice':
content.append(Choice(self))
elif component == 'sequence':
content.append(Sequence(self))
elif component == 'any':
content.append(ElementWildCard(self))
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
content[-1].fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class All(XMLSchemaComponent,\
AllMarker):
"""<all>
parents:
complexType, extension, restriction, group
attributes:
id -- ID
minOccurs -- '0' | '1', 1
maxOccurs -- '1', 1
contents:
annotation?, element*
"""
attributes = {'id':None,
'minOccurs':'1',
'maxOccurs':'1'}
contents = {'xsd':['annotation', 'element']}
tag = 'all'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
continue
elif component == 'element':
if i.hasattr('ref'):
content.append(ElementReference(self))
else:
content.append(LocalElementDeclaration(self))
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
content[-1].fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class Choice(XMLSchemaComponent,\
ChoiceMarker):
"""<choice>
parents:
complexType, extension, restriction, group, choice, sequence
attributes:
id -- ID
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
contents:
annotation?, (element | group | choice | sequence | any)*
"""
attributes = {'id':None,
'minOccurs':'1',
'maxOccurs':'1'}
contents = {'xsd':['annotation', 'element', 'group', 'choice', 'sequence',\
'any']}
tag = 'choice'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
continue
elif component == 'element':
if i.hasattr('ref'):
content.append(ElementReference(self))
else:
content.append(LocalElementDeclaration(self))
elif component == 'group':
content.append(ModelGroupReference(self))
elif component == 'choice':
content.append(Choice(self))
elif component == 'sequence':
content.append(Sequence(self))
elif component == 'any':
content.append(ElementWildCard(self))
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
content[-1].fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class ModelGroupDefinition(XMLSchemaComponent,\
ModelGroupMarker,\
DefinitionMarker):
"""<group name>
parents:
redefine, schema
attributes:
id -- ID
name -- NCName, required
contents:
annotation?, (all | choice | sequence)?
"""
required = ['name']
attributes = {'id':None,
'name':None}
contents = {'xsd':['annotation', 'all', 'choice', 'sequence']}
tag = 'group'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
continue
elif component == 'all' and not self.content:
self.content = All(self)
elif component == 'choice' and not self.content:
self.content = Choice(self)
elif component == 'sequence' and not self.content:
self.content = Sequence(self)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class ModelGroupReference(XMLSchemaComponent,\
ModelGroupMarker,\
ReferenceMarker):
"""<group ref>
parents:
choice, complexType, extension, restriction, sequence
attributes:
id -- ID
ref -- NCName, required
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
contents:
annotation?
"""
required = ['ref']
attributes = {'id':None,
'ref':None,
'minOccurs':'1',
'maxOccurs':'1'}
contents = {'xsd':['annotation']}
tag = 'group'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def getModelGroupReference(self):
return self.getModelGroup('ref')
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class ComplexType(XMLSchemaComponent,\
DefinitionMarker,\
ComplexMarker):
"""<complexType name>
parents:
redefine, schema
attributes:
id -- ID
name -- NCName, required
mixed -- boolean, false
abstract -- boolean, false
block -- ('#all' | ('extension' | 'restriction')*), schema.blockDefault
final -- ('#all' | ('extension' | 'restriction')*), schema.finalDefault
contents:
annotation?, (simpleContent | complexContent |
((group | all | choice | sequence)?, (attribute | attributeGroup)*, anyAttribute?))
"""
required = ['name']
attributes = {'id':None,
'name':None,
'mixed':0,
'abstract':0,
'block':lambda self: self._parent().getBlockDefault(),
'final':lambda self: self._parent().getFinalDefault()}
contents = {'xsd':['annotation', 'simpleContent', 'complexContent',\
'group', 'all', 'choice', 'sequence', 'attribute', 'attributeGroup',\
'anyAttribute', 'any']}
tag = 'complexType'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
self.attr_content = None
def getAttributeContent(self):
return self.attr_content
def getElementDeclaration(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def getTypeDefinition(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
indx = 0
num = len(contents)
#XXX ugly
if not num:
return
component = SplitQName(contents[indx].getTagName())[1]
if component == 'annotation':
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
indx += 1
component = SplitQName(contents[indx].getTagName())[1]
self.content = None
if component == 'simpleContent':
self.content = self.__class__.SimpleContent(self)
self.content.fromDom(contents[indx])
elif component == 'complexContent':
self.content = self.__class__.ComplexContent(self)
self.content.fromDom(contents[indx])
else:
if component == 'all':
self.content = All(self)
elif component == 'choice':
self.content = Choice(self)
elif component == 'sequence':
self.content = Sequence(self)
elif component == 'group':
self.content = ModelGroupReference(self)
if self.content:
self.content.fromDom(contents[indx])
indx += 1
self.attr_content = []
while indx < num:
component = SplitQName(contents[indx].getTagName())[1]
if component == 'attribute':
if contents[indx].hasattr('ref'):
self.attr_content.append(AttributeReference(self))
else:
self.attr_content.append(LocalAttributeDeclaration(self))
elif component == 'attributeGroup':
self.attr_content.append(AttributeGroupReference(self))
elif component == 'anyAttribute':
self.attr_content.append(AttributeWildCard(self))
else:
raise SchemaError, 'Unknown component (%s): %s' \
%(contents[indx].getTagName(),self.getItemTrace())
self.attr_content[-1].fromDom(contents[indx])
indx += 1
class _DerivedType(XMLSchemaComponent):
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.derivation = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
continue
elif component == 'restriction' and not self.derivation:
self.derivation = self.__class__.Restriction(self)
elif component == 'extension' and not self.derivation:
self.derivation = self.__class__.Extension(self)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.derivation.fromDom(i)
class ComplexContent(_DerivedType,\
ComplexMarker):
"""<complexContent>
parents:
complexType
attributes:
id -- ID
mixed -- boolean, false
contents:
annotation?, (restriction | extension)
"""
attributes = {'id':None,
'mixed':0 }
contents = {'xsd':['annotation', 'restriction', 'extension']}
tag = 'complexContent'
class _DerivationBase(XMLSchemaComponent):
"""<extension>,<restriction>
parents:
complexContent
attributes:
id -- ID
base -- QName, required
contents:
annotation?, (group | all | choice | sequence)?,
(attribute | attributeGroup)*, anyAttribute?
"""
required = ['base']
attributes = {'id':None,
'base':None }
contents = {'xsd':['annotation', 'group', 'all', 'choice',\
'sequence', 'attribute', 'attributeGroup', 'anyAttribute']}
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
self.attr_content = None
def getAttributeContent(self):
return self.attr_content
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
indx = 0
num = len(contents)
#XXX ugly
if not num:
return
component = SplitQName(contents[indx].getTagName())[1]
if component == 'annotation':
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
indx += 1
component = SplitQName(contents[indx].getTagName())[1]
if component == 'all':
self.content = All(self)
self.content.fromDom(contents[indx])
indx += 1
elif component == 'choice':
self.content = Choice(self)
self.content.fromDom(contents[indx])
indx += 1
elif component == 'sequence':
self.content = Sequence(self)
self.content.fromDom(contents[indx])
indx += 1
elif component == 'group':
self.content = ModelGroupReference(self)
self.content.fromDom(contents[indx])
indx += 1
else:
self.content = None
self.attr_content = []
while indx < num:
component = SplitQName(contents[indx].getTagName())[1]
if component == 'attribute':
if contents[indx].hasattr('ref'):
self.attr_content.append(AttributeReference(self))
else:
self.attr_content.append(LocalAttributeDeclaration(self))
elif component == 'attributeGroup':
if contents[indx].hasattr('ref'):
self.attr_content.append(AttributeGroupReference(self))
else:
self.attr_content.append(AttributeGroupDefinition(self))
elif component == 'anyAttribute':
self.attr_content.append(AttributeWildCard(self))
else:
raise SchemaError, 'Unknown component (%s)' %(contents[indx].getTagName())
self.attr_content[-1].fromDom(contents[indx])
indx += 1
class Extension(_DerivationBase,
ExtensionMarker):
"""<extension base>
parents:
complexContent
attributes:
id -- ID
base -- QName, required
contents:
annotation?, (group | all | choice | sequence)?,
(attribute | attributeGroup)*, anyAttribute?
"""
tag = 'extension'
class Restriction(_DerivationBase,\
RestrictionMarker):
"""<restriction base>
parents:
complexContent
attributes:
id -- ID
base -- QName, required
contents:
annotation?, (group | all | choice | sequence)?,
(attribute | attributeGroup)*, anyAttribute?
"""
tag = 'restriction'
class SimpleContent(_DerivedType,\
SimpleMarker):
"""<simpleContent>
parents:
complexType
attributes:
id -- ID
contents:
annotation?, (restriction | extension)
"""
attributes = {'id':None}
contents = {'xsd':['annotation', 'restriction', 'extension']}
tag = 'simpleContent'
class Extension(XMLSchemaComponent,\
ExtensionMarker):
"""<extension base>
parents:
simpleContent
attributes:
id -- ID
base -- QName, required
contents:
annotation?, (attribute | attributeGroup)*, anyAttribute?
"""
required = ['base']
attributes = {'id':None,
'base':None }
contents = {'xsd':['annotation', 'attribute', 'attributeGroup',
'anyAttribute']}
tag = 'extension'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.attr_content = None
def getAttributeContent(self):
return self.attr_content
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
indx = 0
num = len(contents)
if num:
component = SplitQName(contents[indx].getTagName())[1]
if component == 'annotation':
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
indx += 1
component = SplitQName(contents[indx].getTagName())[1]
content = []
while indx < num:
component = SplitQName(contents[indx].getTagName())[1]
if component == 'attribute':
if contents[indx].hasattr('ref'):
content.append(AttributeReference(self))
else:
content.append(LocalAttributeDeclaration(self))
elif component == 'attributeGroup':
content.append(AttributeGroupReference(self))
elif component == 'anyAttribute':
content.append(AttributeWildCard(self))
else:
raise SchemaError, 'Unknown component (%s)'\
%(contents[indx].getTagName())
content[-1].fromDom(contents[indx])
indx += 1
self.attr_content = tuple(content)
class Restriction(XMLSchemaComponent,\
RestrictionMarker):
"""<restriction base>
parents:
simpleContent
attributes:
id -- ID
base -- QName, required
contents:
annotation?, simpleType?, (enumeration | length |
maxExclusive | maxInclusive | maxLength | minExclusive |
minInclusive | minLength | pattern | fractionDigits |
totalDigits | whiteSpace)*, (attribute | attributeGroup)*,
anyAttribute?
"""
required = ['base']
attributes = {'id':None,
'base':None }
contents = {'xsd':['annotation', 'simpleType', 'attribute',\
'attributeGroup', 'anyAttribute'] + RestrictionMarker.facets}
tag = 'restriction'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
self.attr_content = None
def getAttributeContent(self):
return self.attr_content
def fromDom(self, node):
self.content = []
self.setAttributes(node)
contents = self.getContents(node)
indx = 0
num = len(contents)
component = SplitQName(contents[indx].getTagName())[1]
if component == 'annotation':
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
indx += 1
component = SplitQName(contents[indx].getTagName())[1]
content = []
while indx < num:
component = SplitQName(contents[indx].getTagName())[1]
if component == 'attribute':
if contents[indx].hasattr('ref'):
content.append(AttributeReference(self))
else:
content.append(LocalAttributeDeclaration(self))
elif component == 'attributeGroup':
content.append(AttributeGroupReference(self))
elif component == 'anyAttribute':
content.append(AttributeWildCard(self))
elif component == 'simpleType':
self.content.append(LocalSimpleType(self))
self.content[-1].fromDom(contents[indx])
else:
raise SchemaError, 'Unknown component (%s)'\
%(contents[indx].getTagName())
content[-1].fromDom(contents[indx])
indx += 1
self.attr_content = tuple(content)
class LocalComplexType(ComplexType,\
LocalMarker):
"""<complexType>
parents:
element
attributes:
id -- ID
mixed -- boolean, false
contents:
annotation?, (simpleContent | complexContent |
((group | all | choice | sequence)?, (attribute | attributeGroup)*, anyAttribute?))
"""
required = []
attributes = {'id':None,
'mixed':0}
tag = 'complexType'
class SimpleType(XMLSchemaComponent,\
DefinitionMarker,\
SimpleMarker):
"""<simpleType name>
parents:
redefine, schema
attributes:
id -- ID
name -- NCName, required
final -- ('#all' | ('extension' | 'restriction' | 'list' | 'union')*),
schema.finalDefault
contents:
annotation?, (restriction | list | union)
"""
required = ['name']
attributes = {'id':None,
'name':None,
'final':lambda self: self._parent().getFinalDefault()}
contents = {'xsd':['annotation', 'restriction', 'list', 'union']}
tag = 'simpleType'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def getElementDeclaration(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def getTypeDefinition(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for child in contents:
component = SplitQName(child.getTagName())[1]
if component == 'annotation':
self.annotation = Annotation(self)
self.annotation.fromDom(child)
continue
break
else:
return
if component == 'restriction':
self.content = self.__class__.Restriction(self)
elif component == 'list':
self.content = self.__class__.List(self)
elif component == 'union':
self.content = self.__class__.Union(self)
else:
raise SchemaError, 'Unknown component (%s)' %(component)
self.content.fromDom(child)
class Restriction(XMLSchemaComponent,\
RestrictionMarker):
"""<restriction base>
parents:
simpleType
attributes:
id -- ID
base -- QName, required or simpleType child
contents:
annotation?, simpleType?, (enumeration | length |
maxExclusive | maxInclusive | maxLength | minExclusive |
minInclusive | minLength | pattern | fractionDigits |
totalDigits | whiteSpace)*
"""
attributes = {'id':None,
'base':None }
contents = {'xsd':['annotation', 'simpleType']+RestrictionMarker.facets}
tag = 'restriction'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def getSimpleTypeContent(self):
for el in self.content:
if el.isSimple(): return el
return None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for indx in range(len(contents)):
component = SplitQName(contents[indx].getTagName())[1]
if (component == 'annotation') and (not indx):
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
continue
elif (component == 'simpleType') and (not indx or indx == 1):
content.append(AnonymousSimpleType(self))
content[-1].fromDom(contents[indx])
elif component in RestrictionMarker.facets:
#print_debug('%s class instance, skipping %s' %(self.__class__, component))
pass
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class Union(XMLSchemaComponent,
UnionMarker):
"""<union>
parents:
simpleType
attributes:
id -- ID
memberTypes -- list of QNames, required or simpleType child.
contents:
annotation?, simpleType*
"""
attributes = {'id':None,
'memberTypes':None }
contents = {'xsd':['annotation', 'simpleType']}
tag = 'union'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for indx in range(len(contents)):
component = SplitQName(contents[indx].getTagName())[1]
if (component == 'annotation') and (not indx):
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
elif (component == 'simpleType'):
content.append(AnonymousSimpleType(self))
content[-1].fromDom(contents[indx])
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class List(XMLSchemaComponent,
ListMarker):
"""<list>
parents:
simpleType
attributes:
id -- ID
itemType -- QName, required or simpleType child.
contents:
annotation?, simpleType?
"""
attributes = {'id':None,
'itemType':None }
contents = {'xsd':['annotation', 'simpleType']}
tag = 'list'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def getItemType(self):
return self.attributes.get('itemType')
def getTypeDefinition(self, attribute='itemType'):
'''return the type refered to by itemType attribute or
the simpleType content. If returns None, then the
type refered to by itemType is primitive.
'''
tp = XMLSchemaComponent.getTypeDefinition(self, attribute)
return tp or self.content
def fromDom(self, node):
self.annotation = None
self.content = None
self.setAttributes(node)
contents = self.getContents(node)
for indx in range(len(contents)):
component = SplitQName(contents[indx].getTagName())[1]
if (component == 'annotation') and (not indx):
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
elif (component == 'simpleType'):
self.content = AnonymousSimpleType(self)
self.content.fromDom(contents[indx])
break
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class AnonymousSimpleType(SimpleType,\
SimpleMarker):
"""<simpleType>
parents:
attribute, element, list, restriction, union
attributes:
id -- ID
contents:
annotation?, (restriction | list | union)
"""
required = []
attributes = {'id':None}
tag = 'simpleType'
class Redefine:
"""<redefine>
parents:
attributes:
contents:
"""
tag = 'redefine'
###########################
###########################
if sys.version_info[:2] >= (2, 2):
tupleClass = tuple
else:
import UserTuple
tupleClass = UserTuple.UserTuple
class TypeDescriptionComponent(tupleClass):
"""Tuple of length 2, consisting of
a namespace and unprefixed name.
"""
def __init__(self, args):
"""args -- (namespace, name)
Remove the name's prefix, irrelevant.
"""
if len(args) != 2:
raise TypeError, 'expecting tuple (namespace, name), got %s' %args
elif args[1].find(':') >= 0:
args = (args[0], SplitQName(args[1])[1])
tuple.__init__(self, args)
return
def getTargetNamespace(self):
return self[0]
def getName(self):
return self[1]
| Python |
"""
A more or less complete user-defined wrapper around tuple objects.
Adapted version of the standard library's UserList.
Taken from Stefan Schwarzer's ftputil library, available at
<http://www.ndh.net/home/sschwarzer/python/python_software.html>, and used under this license:
Copyright (C) 1999, Stefan Schwarzer
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of the above author nor the names of the
contributors to the software may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# $Id: UserTuple.py,v 1.1 2003/07/21 14:18:54 warnes Exp $
#XXX tuple instances (in Python 2.2) contain also:
# __class__, __delattr__, __getattribute__, __hash__, __new__,
# __reduce__, __setattr__, __str__
# What about these?
class UserTuple:
def __init__(self, inittuple=None):
self.data = ()
if inittuple is not None:
# XXX should this accept an arbitrary sequence?
if type(inittuple) == type(self.data):
self.data = inittuple
elif isinstance(inittuple, UserTuple):
# this results in
# self.data is inittuple.data
# but that's ok for tuples because they are
# immutable. (Builtin tuples behave the same.)
self.data = inittuple.data[:]
else:
# the same applies here; (t is tuple(t)) == 1
self.data = tuple(inittuple)
def __repr__(self): return repr(self.data)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cast(self, other):
if isinstance(other, UserTuple): return other.data
else: return other
def __cmp__(self, other):
return cmp(self.data, self.__cast(other))
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
def __getitem__(self, i): return self.data[i]
def __getslice__(self, i, j):
i = max(i, 0); j = max(j, 0)
return self.__class__(self.data[i:j])
def __add__(self, other):
if isinstance(other, UserTuple):
return self.__class__(self.data + other.data)
elif isinstance(other, type(self.data)):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + tuple(other))
# dir( () ) contains no __radd__ (at least in Python 2.2)
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
| Python |
#! /usr/bin/env python
"""WSDL parsing services package for Web Services for Python."""
ident = "$Id: __init__.py,v 1.11 2004/12/07 15:54:53 blunck2 Exp $"
import WSDLTools
import XMLname
import logging
| Python |
"""Translate strings to and from SOAP 1.2 XML name encoding
Implements rules for mapping application defined name to XML names
specified by the w3 SOAP working group for SOAP version 1.2 in
Appendix A of "SOAP Version 1.2 Part 2: Adjuncts", W3C Working Draft
17, December 2001, <http://www.w3.org/TR/soap12-part2/#namemap>
Also see <http://www.w3.org/2000/xp/Group/xmlp-issues>.
Author: Gregory R. Warnes <Gregory.R.Warnes@Pfizer.com>
Date:: 2002-04-25
Version 0.9.0
"""
ident = "$Id: XMLname.py,v 1.4 2005/02/16 14:45:37 warnes Exp $"
from re import *
def _NCNameChar(x):
return x.isalpha() or x.isdigit() or x=="." or x=='-' or x=="_"
def _NCNameStartChar(x):
return x.isalpha() or x=="_"
def _toUnicodeHex(x):
hexval = hex(ord(x[0]))[2:]
hexlen = len(hexval)
# Make hexval have either 4 or 8 digits by prepending 0's
if (hexlen==1): hexval = "000" + hexval
elif (hexlen==2): hexval = "00" + hexval
elif (hexlen==3): hexval = "0" + hexval
elif (hexlen==4): hexval = "" + hexval
elif (hexlen==5): hexval = "000" + hexval
elif (hexlen==6): hexval = "00" + hexval
elif (hexlen==7): hexval = "0" + hexval
elif (hexlen==8): hexval = "" + hexval
else: raise Exception, "Illegal Value returned from hex(ord(x))"
return "_x"+ hexval + "_"
def _fromUnicodeHex(x):
return eval( r'u"\u'+x[2:-1]+'"' )
def toXMLname(string):
"""Convert string to a XML name."""
if string.find(':') != -1 :
(prefix, localname) = string.split(':',1)
else:
prefix = None
localname = string
T = unicode(localname)
N = len(localname)
X = [];
for i in range(N) :
if i< N-1 and T[i]==u'_' and T[i+1]==u'x':
X.append(u'_x005F_')
elif i==0 and N >= 3 and \
( T[0]==u'x' or T[0]==u'X' ) and \
( T[1]==u'm' or T[1]==u'M' ) and \
( T[2]==u'l' or T[2]==u'L' ):
X.append(u'_xFFFF_' + T[0])
elif (not _NCNameChar(T[i])) or (i==0 and not _NCNameStartChar(T[i])):
X.append(_toUnicodeHex(T[i]))
else:
X.append(T[i])
if prefix:
return "%s:%s" % (prefix, u''.join(X))
return u''.join(X)
def fromXMLname(string):
"""Convert XML name to unicode string."""
retval = sub(r'_xFFFF_','', string )
def fun( matchobj ):
return _fromUnicodeHex( matchobj.group(0) )
retval = sub(r'_x[0-9A-Za-z]+_', fun, retval )
return retval
| Python |
#! /usr/bin/env python
"""Namespace module, so you don't need PyXML
"""
try:
from xml.ns import SOAP, SCHEMA, WSDL, XMLNS, DSIG, ENCRYPTION
DSIG.C14N = "http://www.w3.org/TR/2001/REC-xml-c14n-20010315"
except:
class SOAP:
ENV = "http://schemas.xmlsoap.org/soap/envelope/"
ENC = "http://schemas.xmlsoap.org/soap/encoding/"
ACTOR_NEXT = "http://schemas.xmlsoap.org/soap/actor/next"
class SCHEMA:
XSD1 = "http://www.w3.org/1999/XMLSchema"
XSD2 = "http://www.w3.org/2000/10/XMLSchema"
XSD3 = "http://www.w3.org/2001/XMLSchema"
XSD_LIST = [ XSD1, XSD2, XSD3 ]
XSI1 = "http://www.w3.org/1999/XMLSchema-instance"
XSI2 = "http://www.w3.org/2000/10/XMLSchema-instance"
XSI3 = "http://www.w3.org/2001/XMLSchema-instance"
XSI_LIST = [ XSI1, XSI2, XSI3 ]
BASE = XSD3
class WSDL:
BASE = "http://schemas.xmlsoap.org/wsdl/"
BIND_HTTP = "http://schemas.xmlsoap.org/wsdl/http/"
BIND_MIME = "http://schemas.xmlsoap.org/wsdl/mime/"
BIND_SOAP = "http://schemas.xmlsoap.org/wsdl/soap/"
BIND_SOAP12 = "http://schemas.xmlsoap.org/wsdl/soap12/"
class XMLNS:
BASE = "http://www.w3.org/2000/xmlns/"
XML = "http://www.w3.org/XML/1998/namespace"
HTML = "http://www.w3.org/TR/REC-html40"
class DSIG:
BASE = "http://www.w3.org/2000/09/xmldsig#"
C14N = "http://www.w3.org/TR/2001/REC-xml-c14n-20010315"
C14N_COMM = "http://www.w3.org/TR/2000/CR-xml-c14n-20010315#WithComments"
C14N_EXCL = "http://www.w3.org/2001/10/xml-exc-c14n#"
DIGEST_MD2 = "http://www.w3.org/2000/09/xmldsig#md2"
DIGEST_MD5 = "http://www.w3.org/2000/09/xmldsig#md5"
DIGEST_SHA1 = "http://www.w3.org/2000/09/xmldsig#sha1"
ENC_BASE64 = "http://www.w3.org/2000/09/xmldsig#base64"
ENVELOPED = "http://www.w3.org/2000/09/xmldsig#enveloped-signature"
HMAC_SHA1 = "http://www.w3.org/2000/09/xmldsig#hmac-sha1"
SIG_DSA_SHA1 = "http://www.w3.org/2000/09/xmldsig#dsa-sha1"
SIG_RSA_SHA1 = "http://www.w3.org/2000/09/xmldsig#rsa-sha1"
XPATH = "http://www.w3.org/TR/1999/REC-xpath-19991116"
XSLT = "http://www.w3.org/TR/1999/REC-xslt-19991116"
class ENCRYPTION:
BASE = "http://www.w3.org/2001/04/xmlenc#"
BLOCK_3DES = "http://www.w3.org/2001/04/xmlenc#des-cbc"
BLOCK_AES128 = "http://www.w3.org/2001/04/xmlenc#aes128-cbc"
BLOCK_AES192 = "http://www.w3.org/2001/04/xmlenc#aes192-cbc"
BLOCK_AES256 = "http://www.w3.org/2001/04/xmlenc#aes256-cbc"
DIGEST_RIPEMD160 = "http://www.w3.org/2001/04/xmlenc#ripemd160"
DIGEST_SHA256 = "http://www.w3.org/2001/04/xmlenc#sha256"
DIGEST_SHA512 = "http://www.w3.org/2001/04/xmlenc#sha512"
KA_DH = "http://www.w3.org/2001/04/xmlenc#dh"
KT_RSA_1_5 = "http://www.w3.org/2001/04/xmlenc#rsa-1_5"
KT_RSA_OAEP = "http://www.w3.org/2001/04/xmlenc#rsa-oaep-mgf1p"
STREAM_ARCFOUR = "http://www.w3.org/2001/04/xmlenc#arcfour"
WRAP_3DES = "http://www.w3.org/2001/04/xmlenc#kw-3des"
WRAP_AES128 = "http://www.w3.org/2001/04/xmlenc#kw-aes128"
WRAP_AES192 = "http://www.w3.org/2001/04/xmlenc#kw-aes192"
WRAP_AES256 = "http://www.w3.org/2001/04/xmlenc#kw-aes256"
class OASIS:
'''URLs for Oasis specifications
'''
WSSE = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd"
UTILITY = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd"
LIFETIME = "http://docs.oasis-open.org/wsrf/2004/06/wsrf-WS-ResourceLifetime-1.2-draft-01.xsd"
PROPERTIES = "http://docs.oasis-open.org/wsrf/2004/06/wsrf-WS-ResourceProperties-1.2-draft-01.xsd"
BASENOTIFICATION = "http://docs.oasis-open.org/wsn/2004/06/wsn-WS-BaseNotification-1.2-draft-01.xsd"
BASEFAULTS = "http://docs.oasis-open.org/wsrf/2004/06/wsrf-WS-BaseFaults-1.2-draft-01.xsd"
class WSSE:
BASE = "http://schemas.xmlsoap.org/ws/2002/04/secext"
TRUST = "http://schemas.xmlsoap.org/ws/2004/04/trust"
class WSU:
BASE = "http://schemas.xmlsoap.org/ws/2002/04/utility"
UTILITY = "http://schemas.xmlsoap.org/ws/2002/07/utility"
class WSR:
PROPERTIES = "http://www.ibm.com/xmlns/stdwip/web-services/WS-ResourceProperties"
LIFETIME = "http://www.ibm.com/xmlns/stdwip/web-services/WS-ResourceLifetime"
class WSA200408:
ADDRESS = "http://schemas.xmlsoap.org/ws/2004/08/addressing"
ANONYMOUS = "%s/role/anonymous" %ADDRESS
FAULT = "%s/fault" %ADDRESS
WSA = WSA200408
class WSA200403:
ADDRESS = "http://schemas.xmlsoap.org/ws/2004/03/addressing"
ANONYMOUS = "%s/role/anonymous" %ADDRESS
FAULT = "%s/fault" %ADDRESS
class WSA200303:
ADDRESS = "http://schemas.xmlsoap.org/ws/2003/03/addressing"
ANONYMOUS = "%s/role/anonymous" %ADDRESS
FAULT = None
class WSP:
POLICY = "http://schemas.xmlsoap.org/ws/2002/12/policy"
class BEA:
SECCONV = "http://schemas.xmlsoap.org/ws/2004/04/sc"
class GLOBUS:
SECCONV = "http://wsrf.globus.org/core/2004/07/security/secconv"
CORE = "http://www.globus.org/namespaces/2004/06/core"
SIG = "http://www.globus.org/2002/04/xmlenc#gssapi-sign"
ZSI_SCHEMA_URI = 'http://www.zolera.com/schemas/ZSI/'
| Python |
#! /usr/bin/env python
"""Compatibility module, imported by ZSI if you don't have PyXML 0.7.
No copyright violations -- we're only using parts of PyXML that we
wrote.
"""
_copyright = '''ZSI: Zolera Soap Infrastructure.
Copyright 2001, Zolera Systems, Inc. All Rights Reserved.
Copyright 2002-2003, Rich Salz. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, and/or
sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, provided that the above copyright notice(s) and
this permission notice appear in all copies of the Software and that
both the above copyright notice(s) and this permission notice appear in
supporting documentation.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
OR PERFORMANCE OF THIS SOFTWARE.
Except as contained in this notice, the name of a copyright holder
shall not be used in advertising or otherwise to promote the sale, use
or other dealings in this Software without prior written authorization
of the copyright holder.
'''
_copyright += "\n\nPortions are also: "
_copyright += '''Copyright 2001, Zolera Systems Inc. All Rights Reserved.
Copyright 2001, MIT. All Rights Reserved.
Distributed under the terms of:
Python 2.0 License or later.
http://www.python.org/2.0.1/license.html
or
W3C Software License
http://www.w3.org/Consortium/Legal/copyright-software-19980720
'''
from xml.dom import Node
from Namespaces import XMLNS
import cStringIO as StringIO
try:
from xml.dom.ext import c14n
except ImportError, ex:
_implementation2 = None
_attrs = lambda E: (E.attributes and E.attributes.values()) or []
_children = lambda E: E.childNodes or []
else:
class _implementation2(c14n._implementation):
"""Patch for exclusive c14n
"""
def __init__(self, node, write, **kw):
self.unsuppressedPrefixes = kw.get('unsuppressedPrefixes')
self._exclusive = None
if node.nodeType == Node.ELEMENT_NODE:
if not c14n._inclusive(self):
self._exclusive = self._inherit_context(node)
c14n._implementation.__init__(self, node, write, **kw)
def _do_element(self, node, initial_other_attrs = []):
"""Patch for the xml.dom.ext.c14n implemenation _do_element method.
This fixes a problem with sorting of namespaces.
"""
# Get state (from the stack) make local copies.
# ns_parent -- NS declarations in parent
# ns_rendered -- NS nodes rendered by ancestors
# ns_local -- NS declarations relevant to this element
# xml_attrs -- Attributes in XML namespace from parent
# xml_attrs_local -- Local attributes in XML namespace.
ns_parent, ns_rendered, xml_attrs = \
self.state[0], self.state[1].copy(), self.state[2].copy() #0422
ns_local = ns_parent.copy()
xml_attrs_local = {}
# Divide attributes into NS, XML, and others.
#other_attrs = initial_other_attrs[:]
other_attrs = []
sort_these_attrs = initial_other_attrs[:]
in_subset = c14n._in_subset(self.subset, node)
#for a in _attrs(node):
sort_these_attrs +=c14n._attrs(node)
for a in sort_these_attrs:
if a.namespaceURI == c14n.XMLNS.BASE:
n = a.nodeName
if n == "xmlns:": n = "xmlns" # DOM bug workaround
ns_local[n] = a.nodeValue
elif a.namespaceURI == c14n.XMLNS.XML:
if c14n._inclusive(self) or (in_subset and c14n._in_subset(self.subset, a)): #020925 Test to see if attribute node in subset
xml_attrs_local[a.nodeName] = a #0426
else:
if c14n._in_subset(self.subset, a): #020925 Test to see if attribute node in subset
other_attrs.append(a)
#add local xml:foo attributes to ancestor's xml:foo attributes
xml_attrs.update(xml_attrs_local)
# Render the node
W, name = self.write, None
if in_subset:
name = node.nodeName
W('<')
W(name)
# Create list of NS attributes to render.
ns_to_render = []
for n,v in ns_local.items():
# If default namespace is XMLNS.BASE or empty,
# and if an ancestor was the same
if n == "xmlns" and v in [ c14n.XMLNS.BASE, '' ] \
and ns_rendered.get('xmlns') in [ c14n.XMLNS.BASE, '', None ]:
continue
# "omit namespace node with local name xml, which defines
# the xml prefix, if its string value is
# http://www.w3.org/XML/1998/namespace."
if n in ["xmlns:xml", "xml"] \
and v in [ 'http://www.w3.org/XML/1998/namespace' ]:
continue
# If not previously rendered
# and it's inclusive or utilized
if (n,v) not in ns_rendered.items() \
and (c14n._inclusive(self) or \
c14n._utilized(n, node, other_attrs, self.unsuppressedPrefixes)):
ns_to_render.append((n, v))
#####################################
# JRB
#####################################
if not c14n._inclusive(self):
if node.prefix is None:
look_for = [('xmlns', node.namespaceURI),]
else:
look_for = [('xmlns:%s' %node.prefix, node.namespaceURI),]
for a in c14n._attrs(node):
if a.namespaceURI != XMLNS.BASE:
#print "ATTRIBUTE: ", (a.namespaceURI, a.prefix)
if a.prefix:
#print "APREFIX: ", a.prefix
look_for.append(('xmlns:%s' %a.prefix, a.namespaceURI))
for key,namespaceURI in look_for:
if ns_rendered.has_key(key):
if ns_rendered[key] == namespaceURI:
# Dont write out
pass
else:
#ns_to_render += [(key, namespaceURI)]
pass
elif (key,namespaceURI) in ns_to_render:
# Dont write out
pass
else:
# Unique write out, rewrite to render
ns_local[key] = namespaceURI
for a in self._exclusive:
if a.nodeName == key:
#self._do_attr(a.nodeName, a.value)
#ns_rendered[key] = namespaceURI
#break
ns_to_render += [(a.nodeName, a.value)]
break
elif key is None and a.nodeName == 'xmlns':
#print "DEFAULT: ", (a.nodeName, a.value)
ns_to_render += [(a.nodeName, a.value)]
break
#print "KEY: ", key
else:
#print "Look for: ", look_for
#print "NS_TO_RENDER: ", ns_to_render
#print "EXCLUSIVE NS: ", map(lambda f: (f.nodeName,f.value),self._exclusive)
raise RuntimeError, \
'can not find namespace (%s="%s") for exclusive canonicalization'\
%(key, namespaceURI)
#####################################
# Sort and render the ns, marking what was rendered.
ns_to_render.sort(c14n._sorter_ns)
for n,v in ns_to_render:
#XXX JRB, getting 'xmlns,None' here when xmlns=''
if v: self._do_attr(n, v)
else:
v = ''
self._do_attr(n, v)
ns_rendered[n]=v #0417
# If exclusive or the parent is in the subset, add the local xml attributes
# Else, add all local and ancestor xml attributes
# Sort and render the attributes.
if not c14n._inclusive(self) or c14n._in_subset(self.subset,node.parentNode): #0426
other_attrs.extend(xml_attrs_local.values())
else:
other_attrs.extend(xml_attrs.values())
#print "OTHER: ", other_attrs
other_attrs.sort(c14n._sorter)
for a in other_attrs:
self._do_attr(a.nodeName, a.value)
W('>')
# Push state, recurse, pop state.
state, self.state = self.state, (ns_local, ns_rendered, xml_attrs)
for c in c14n._children(node):
c14n._implementation.handlers[c.nodeType](self, c)
self.state = state
if name: W('</%s>' % name)
c14n._implementation.handlers[c14n.Node.ELEMENT_NODE] = _do_element
_IN_XML_NS = lambda n: n.namespaceURI == XMLNS.XML
# Does a document/PI has lesser/greater document order than the
# first element?
_LesserElement, _Element, _GreaterElement = range(3)
def _sorter(n1,n2):
'''_sorter(n1,n2) -> int
Sorting predicate for non-NS attributes.'''
i = cmp(n1.namespaceURI, n2.namespaceURI)
if i: return i
return cmp(n1.localName, n2.localName)
def _sorter_ns(n1,n2):
'''_sorter_ns((n,v),(n,v)) -> int
"(an empty namespace URI is lexicographically least)."'''
if n1[0] == 'xmlns': return -1
if n2[0] == 'xmlns': return 1
return cmp(n1[0], n2[0])
def _utilized(n, node, other_attrs, unsuppressedPrefixes):
'''_utilized(n, node, other_attrs, unsuppressedPrefixes) -> boolean
Return true if that nodespace is utilized within the node'''
if n.startswith('xmlns:'):
n = n[6:]
elif n.startswith('xmlns'):
n = n[5:]
if n == node.prefix or n in unsuppressedPrefixes: return 1
for attr in other_attrs:
if n == attr.prefix: return 1
return 0
_in_subset = lambda subset, node: not subset or node in subset
#
# JRB. Currently there is a bug in do_element, but since the underlying
# Data Structures in c14n have changed I can't just apply the
# _implementation2 patch above. But this will work OK for most uses,
# just not XML Signatures.
#
class _implementation:
'''Implementation class for C14N. This accompanies a node during it's
processing and includes the parameters and processing state.'''
# Handler for each node type; populated during module instantiation.
handlers = {}
def __init__(self, node, write, **kw):
'''Create and run the implementation.'''
self.write = write
self.subset = kw.get('subset')
if self.subset:
self.comments = kw.get('comments', 1)
else:
self.comments = kw.get('comments', 0)
self.unsuppressedPrefixes = kw.get('unsuppressedPrefixes')
nsdict = kw.get('nsdict', { 'xml': XMLNS.XML, 'xmlns': XMLNS.BASE })
# Processing state.
self.state = (nsdict, ['xml'], [])
if node.nodeType == Node.DOCUMENT_NODE:
self._do_document(node)
elif node.nodeType == Node.ELEMENT_NODE:
self.documentOrder = _Element # At document element
if self.unsuppressedPrefixes is not None:
self._do_element(node)
else:
inherited = self._inherit_context(node)
self._do_element(node, inherited)
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
pass
else:
raise TypeError, str(node)
def _inherit_context(self, node):
'''_inherit_context(self, node) -> list
Scan ancestors of attribute and namespace context. Used only
for single element node canonicalization, not for subset
canonicalization.'''
# Collect the initial list of xml:foo attributes.
xmlattrs = filter(_IN_XML_NS, _attrs(node))
# Walk up and get all xml:XXX attributes we inherit.
inherited, parent = [], node.parentNode
while parent and parent.nodeType == Node.ELEMENT_NODE:
for a in filter(_IN_XML_NS, _attrs(parent)):
n = a.localName
if n not in xmlattrs:
xmlattrs.append(n)
inherited.append(a)
parent = parent.parentNode
return inherited
def _do_document(self, node):
'''_do_document(self, node) -> None
Process a document node. documentOrder holds whether the document
element has been encountered such that PIs/comments can be written
as specified.'''
self.documentOrder = _LesserElement
for child in node.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.documentOrder = _Element # At document element
self._do_element(child)
self.documentOrder = _GreaterElement # After document element
elif child.nodeType == Node.PROCESSING_INSTRUCTION_NODE:
self._do_pi(child)
elif child.nodeType == Node.COMMENT_NODE:
self._do_comment(child)
elif child.nodeType == Node.DOCUMENT_TYPE_NODE:
pass
else:
raise TypeError, str(child)
handlers[Node.DOCUMENT_NODE] = _do_document
def _do_text(self, node):
'''_do_text(self, node) -> None
Process a text or CDATA node. Render various special characters
as their C14N entity representations.'''
if not _in_subset(self.subset, node): return
s = node.data \
.replace("&", "&") \
.replace("<", "<") \
.replace(">", ">") \
.replace("\015", "
")
if s: self.write(s)
handlers[Node.TEXT_NODE] = _do_text
handlers[Node.CDATA_SECTION_NODE] = _do_text
def _do_pi(self, node):
'''_do_pi(self, node) -> None
Process a PI node. Render a leading or trailing #xA if the
document order of the PI is greater or lesser (respectively)
than the document element.
'''
if not _in_subset(self.subset, node): return
W = self.write
if self.documentOrder == _GreaterElement: W('\n')
W('<?')
W(node.nodeName)
s = node.data
if s:
W(' ')
W(s)
W('?>')
if self.documentOrder == _LesserElement: W('\n')
handlers[Node.PROCESSING_INSTRUCTION_NODE] = _do_pi
def _do_comment(self, node):
'''_do_comment(self, node) -> None
Process a comment node. Render a leading or trailing #xA if the
document order of the comment is greater or lesser (respectively)
than the document element.
'''
if not _in_subset(self.subset, node): return
if self.comments:
W = self.write
if self.documentOrder == _GreaterElement: W('\n')
W('<!--')
W(node.data)
W('-->')
if self.documentOrder == _LesserElement: W('\n')
handlers[Node.COMMENT_NODE] = _do_comment
def _do_attr(self, n, value):
''''_do_attr(self, node) -> None
Process an attribute.'''
W = self.write
W(' ')
W(n)
W('="')
s = value \
.replace("&", "&") \
.replace("<", "<") \
.replace('"', '"') \
.replace('\011', '	') \
.replace('\012', '
') \
.replace('\015', '
')
W(s)
W('"')
def _do_element(self, node, initial_other_attrs = []):
'''_do_element(self, node, initial_other_attrs = []) -> None
Process an element (and its children).'''
# Get state (from the stack) make local copies.
# ns_parent -- NS declarations in parent
# ns_rendered -- NS nodes rendered by ancestors
# xml_attrs -- Attributes in XML namespace from parent
# ns_local -- NS declarations relevant to this element
ns_parent, ns_rendered, xml_attrs = \
self.state[0], self.state[1][:], self.state[2][:]
ns_local = ns_parent.copy()
# Divide attributes into NS, XML, and others.
other_attrs = initial_other_attrs[:]
in_subset = _in_subset(self.subset, node)
for a in _attrs(node):
if a.namespaceURI == XMLNS.BASE:
n = a.nodeName
if n == "xmlns:": n = "xmlns" # DOM bug workaround
ns_local[n] = a.nodeValue
elif a.namespaceURI == XMLNS.XML:
if self.unsuppressedPrefixes is None or in_subset:
xml_attrs.append(a)
else:
other_attrs.append(a)
# Render the node
W, name = self.write, None
if in_subset:
name = node.nodeName
W('<')
W(name)
# Create list of NS attributes to render.
ns_to_render = []
for n,v in ns_local.items():
pval = ns_parent.get(n)
# If default namespace is XMLNS.BASE or empty, skip
if n == "xmlns" \
and v in [ XMLNS.BASE, '' ] and pval in [ XMLNS.BASE, '' ]:
continue
# "omit namespace node with local name xml, which defines
# the xml prefix, if its string value is
# http://www.w3.org/XML/1998/namespace."
if n == "xmlns:xml" \
and v in [ 'http://www.w3.org/XML/1998/namespace' ]:
continue
# If different from parent, or parent didn't render
# and if not exclusive, or this prefix is needed or
# not suppressed
if (v != pval or n not in ns_rendered) \
and (self.unsuppressedPrefixes is None or \
_utilized(n, node, other_attrs, self.unsuppressedPrefixes)):
ns_to_render.append((n, v))
# Sort and render the ns, marking what was rendered.
ns_to_render.sort(_sorter_ns)
for n,v in ns_to_render:
self._do_attr(n, v)
ns_rendered.append(n)
# Add in the XML attributes (don't pass to children, since
# we're rendering them), sort, and render.
other_attrs.extend(xml_attrs)
xml_attrs = []
other_attrs.sort(_sorter)
for a in other_attrs:
self._do_attr(a.nodeName, a.value)
W('>')
# Push state, recurse, pop state.
state, self.state = self.state, (ns_local, ns_rendered, xml_attrs)
for c in _children(node):
_implementation.handlers[c.nodeType](self, c)
self.state = state
if name: W('</%s>' % name)
handlers[Node.ELEMENT_NODE] = _do_element
def Canonicalize(node, output=None, **kw):
'''Canonicalize(node, output=None, **kw) -> UTF-8
Canonicalize a DOM document/element node and all descendents.
Return the text; if output is specified then output.write will
be called to output the text and None will be returned
Keyword parameters:
nsdict: a dictionary of prefix:uri namespace entries
assumed to exist in the surrounding context
comments: keep comments if non-zero (default is 0)
subset: Canonical XML subsetting resulting from XPath
(default is [])
unsuppressedPrefixes: do exclusive C14N, and this specifies the
prefixes that should be inherited.
'''
if output:
if _implementation2 is None:
_implementation(node, output.write, **kw)
else:
apply(_implementation2, (node, output.write), kw)
else:
s = StringIO.StringIO()
if _implementation2 is None:
_implementation(node, s.write, **kw)
else:
apply(_implementation2, (node, s.write), kw)
return s.getvalue()
if __name__ == '__main__': print _copyright
| Python |
#! /usr/bin/env python
"""wstools.WSDLTools.WSDLReader tests directory."""
import utils
| Python |
"""Parse web services description language to get SOAP methods.
Rudimentary support."""
ident = '$Id: WSDL.py,v 1.11 2005/02/21 20:16:15 warnes Exp $'
from version import __version__
import wstools
from Client import SOAPProxy, SOAPAddress
from Config import Config
import urllib
class Proxy:
"""WSDL Proxy.
SOAPProxy wrapper that parses method names, namespaces, soap actions from
the web service description language (WSDL) file passed into the
constructor. The WSDL reference can be passed in as a stream, an url, a
file name, or a string.
Loads info into self.methods, a dictionary with methodname keys and values
of WSDLTools.SOAPCallinfo.
For example,
url = 'http://www.xmethods.org/sd/2001/TemperatureService.wsdl'
wsdl = WSDL.Proxy(url)
print len(wsdl.methods) # 1
print wsdl.methods.keys() # getTemp
See WSDLTools.SOAPCallinfo for more info on each method's attributes.
"""
def __init__(self, wsdlsource, config=Config, **kw ):
reader = wstools.WSDLTools.WSDLReader()
self.wsdl = None
# From Mark Pilgrim's "Dive Into Python" toolkit.py--open anything.
if self.wsdl is None and hasattr(wsdlsource, "read"):
#print 'stream'
self.wsdl = reader.loadFromStream(wsdlsource)
# NOT TESTED (as of April 17, 2003)
#if self.wsdl is None and wsdlsource == '-':
# import sys
# self.wsdl = reader.loadFromStream(sys.stdin)
# print 'stdin'
if self.wsdl is None:
try:
file(wsdlsource)
self.wsdl = reader.loadFromFile(wsdlsource)
#print 'file'
except (IOError, OSError):
pass
if self.wsdl is None:
try:
stream = urllib.urlopen(wsdlsource)
self.wsdl = reader.loadFromStream(stream, wsdlsource)
except (IOError, OSError): pass
if self.wsdl is None:
import StringIO
self.wsdl = reader.loadFromString(str(wsdlsource))
#print 'string'
# Package wsdl info as a dictionary of remote methods, with method name
# as key (based on ServiceProxy.__init__ in ZSI library).
self.methods = {}
service = self.wsdl.services[0]
port = service.ports[0]
name = service.name
binding = port.getBinding()
portType = binding.getPortType()
for operation in portType.operations:
callinfo = wstools.WSDLTools.callInfoFromWSDL(port, operation.name)
self.methods[callinfo.methodName] = callinfo
self.soapproxy = SOAPProxy('http://localhost/dummy.webservice',
config=config, **kw)
def __str__(self):
s = ''
for method in self.methods.values():
s += str(method)
return s
def __getattr__(self, name):
"""Set up environment then let parent class handle call.
Raises AttributeError is method name is not found."""
if not self.methods.has_key(name): raise AttributeError, name
callinfo = self.methods[name]
self.soapproxy.proxy = SOAPAddress(callinfo.location)
self.soapproxy.namespace = callinfo.namespace
self.soapproxy.soapaction = callinfo.soapAction
return self.soapproxy.__getattr__(name)
def show_methods(self):
for key in self.methods.keys():
method = self.methods[key]
print "Method Name:", key.ljust(15)
print
inps = method.inparams
for parm in range(len(inps)):
details = inps[parm]
print " In #%d: %s (%s)" % (parm, details.name, details.type)
print
outps = method.outparams
for parm in range(len(outps)):
details = outps[parm]
print " Out #%d: %s (%s)" % (parm, details.name, details.type)
print
| Python |
"""
GSIServer - Contributed by Ivan R. Judson <judson@mcs.anl.gov>
################################################################################
#
# SOAPpy - Cayce Ullman (cayce@actzero.com)
# Brian Matthews (blm@actzero.com)
# Gregory Warnes (Gregory.R.Warnes@Pfizer.com)
# Christopher Blunck (blunck@gst.com)
#
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
from __future__ import nested_scopes
ident = '$Id: GSIServer.py,v 1.5 2005/02/15 16:32:22 warnes Exp $'
from version import __version__
#import xml.sax
import re
import socket
import sys
import SocketServer
from types import *
import BaseHTTPServer
# SOAPpy modules
from Parser import parseSOAPRPC
from Config import SOAPConfig
from Types import faultType, voidType, simplify
from NS import NS
from SOAPBuilder import buildSOAP
from Utilities import debugHeader, debugFooter
try: from M2Crypto import SSL
except: pass
#####
from Server import *
from pyGlobus.io import GSITCPSocketServer, ThreadingGSITCPSocketServer
from pyGlobus import ioc
def GSIConfig():
config = SOAPConfig()
config.channel_mode = ioc.GLOBUS_IO_SECURE_CHANNEL_MODE_GSI_WRAP
config.delegation_mode = ioc.GLOBUS_IO_SECURE_DELEGATION_MODE_FULL_PROXY
config.tcpAttr = None
config.authMethod = "_authorize"
return config
Config = GSIConfig()
class GSISOAPServer(GSITCPSocketServer, SOAPServerBase):
def __init__(self, addr = ('localhost', 8000),
RequestHandler = SOAPRequestHandler, log = 0,
encoding = 'UTF-8', config = Config, namespace = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
GSITCPSocketServer.__init__(self, addr, RequestHandler,
self.config.channel_mode,
self.config.delegation_mode,
tcpAttr = self.config.tcpAttr)
def get_request(self):
sock, addr = GSITCPSocketServer.get_request(self)
return sock, addr
class ThreadingGSISOAPServer(ThreadingGSITCPSocketServer, SOAPServerBase):
def __init__(self, addr = ('localhost', 8000),
RequestHandler = SOAPRequestHandler, log = 0,
encoding = 'UTF-8', config = Config, namespace = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
ThreadingGSITCPSocketServer.__init__(self, addr, RequestHandler,
self.config.channel_mode,
self.config.delegation_mode,
tcpAttr = self.config.tcpAttr)
def get_request(self):
sock, addr = ThreadingGSITCPSocketServer.get_request(self)
return sock, addr
| Python |
#!/usr/bin/env python
import string
import cgi
ident = '$Id: interop2html.py,v 1.1.1.1 2001/06/27 21:36:14 cullman Exp $'
lines = open('output.txt').readlines()
#preserve the tally
tally = lines[-6:]
#whack the tally from lines
lines = lines[:-6]
table={}
for line in lines:
if line[:3] == ' ' or line == '>\n' : continue
line = line[:-1] #delete end of line char
row = [line[:line.find(': ')], line[line.find(': ')+2:]] #split server name from rest of line
restofrow = row[1].split(' ',3) #break out method name, number, status code, status comment
if len(restofrow) > 3:
if restofrow[3].find('as expected') != -1:
restofrow[2] = restofrow[2] + ' (as expected)'
elif restofrow[3][:2] == '- ' :
restofrow[3] = restofrow[3][2:]
try: table[row[0]].append([restofrow[0],restofrow[2:]])
except KeyError: table[row[0]] = [[restofrow[0],restofrow[2:]]]
print "<html><body>"
print "<script>function popup(text) {"
print "text = '<html><head><title>Test Detail</title></head><body><p>' + text + '</p></body></html>';"
print "newWin=window.open('','win1','location=no,menubar=no,width=400,height=200');"
print "newWin.document.open();"
print "newWin.document.write(text);"
print "newWin.focus(); } </script>"
print "<br><table style='font-family: Arial; color: #cccccc'><tr><td colspan=2><font face=arial color=#cccccc><b>Summary</b></font></td></tr>"
for x in tally:
z = x[:-1].split(":",1)
print "<tr><td><font face=arial color=#cccccc>",z[0],"</font></td><td><font face=arial color=#cccccc>",z[1],"</font></td></tr>"
print "</table><br>"
c = 0
totalmethods = len(table[table.keys()[0]])
while c < totalmethods:
print "<br><table width='95%' style='font-family: Arial'>"
print "<tr><td width='27%' bgcolor='#cccccc'></td>"
cols = [c, c + 1, c + 2]
if c != 16:
cols += [c + 3]
for i in cols:
try: header = table[table.keys()[0]][i][0]
except: break
print "<td width ='17%' align='center' bgcolor='#cccccc'><b>",header,"</b></td>"
print "</tr>"
l = table.keys()
l.sort()
for key in l:
print "<tr><td bgcolor='#cccccc'>", key , "</td>"
for i in cols:
try: status = table[key][i][1][0]
except: break
if status.find("succeed") != -1:
bgcolor = "#339900"
status = "Pass"
elif status.find("expected") != -1:
bgcolor = "#FF9900"
hreftitle = table[key][i][1][1].replace("'","") # remove apostrophes from title properties
popuphtml = '"' + cgi.escape(cgi.escape(table[key][i][1][1]).replace("'","'").replace('"',""")) + '"'
status = "<a title='" + hreftitle + "' href='javascript:popup(" + popuphtml + ")'>Failed (expected)</a>"
else:
bgcolor = "#CC0000"
hreftitle = table[key][i][1][1].replace("'","") # remove apostrophes from title properties
popuphtml = '"' + cgi.escape(cgi.escape(table[key][i][1][1]).replace("'","'").replace('"',""")) + '"'
status = "<a title='" + hreftitle + "' href='javascript:popup(" + popuphtml + ")'>Failed</a>"
print "<td align='center' bgcolor=" , bgcolor , ">" , status , "</td>"
print "</tr>"
print "</table>"
c = c + len(cols)
print "</body></html>"
| Python |
"""
------------------------------------------------------------------
Author: Gregory R. Warnes <Gregory.R.Warnes@Pfizer.com>
Date: 2005-02-24
Version: 0.7.2
Copyright: (c) 2003-2005 Pfizer, Licensed to PSF under a Contributor Agreement
License: Licensed under the Apache License, Version 2.0 (the"License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in
writing, software distributed under the License is
distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See
the License for the specific language governing
permissions and limitations under the License.
------------------------------------------------------------------
"""
from distutils.core import setup
url="http://www.analytics.washington.edu/statcomp/projects/rzope/fpconst/"
import fpconst
setup(name="fpconst",
version=fpconst.__version__,
description="Utilities for handling IEEE 754 floating point special values",
author="Gregory Warnes",
author_email="Gregory.R.Warnes@Pfizer.com",
url = url,
long_description=fpconst.__doc__,
py_modules=['fpconst']
)
| Python |
"""Utilities for handling IEEE 754 floating point special values
This python module implements constants and functions for working with
IEEE754 double-precision special values. It provides constants for
Not-a-Number (NaN), Positive Infinity (PosInf), and Negative Infinity
(NegInf), as well as functions to test for these values.
The code is implemented in pure python by taking advantage of the
'struct' standard module. Care has been taken to generate proper
results on both big-endian and little-endian machines. Some efficiency
could be gained by translating the core routines into C.
See <http://babbage.cs.qc.edu/courses/cs341/IEEE-754references.html>
for reference material on the IEEE 754 floating point standard.
Further information on this package is available at
<http://www.analytics.washington.edu/statcomp/projects/rzope/fpconst/>.
------------------------------------------------------------------
Author: Gregory R. Warnes <Gregory.R.Warnes@Pfizer.com>
Date: 2005-02-24
Version: 0.7.2
Copyright: (c) 2003-2005 Pfizer, Licensed to PSF under a Contributor Agreement
License: Licensed under the Apache License, Version 2.0 (the"License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in
writing, software distributed under the License is
distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See
the License for the specific language governing
permissions and limitations under the License.
------------------------------------------------------------------
"""
__version__ = "0.7.2"
ident = "$Id: fpconst.py,v 1.16 2005/02/24 17:42:03 warnes Exp $"
import struct, operator
# check endianess
_big_endian = struct.pack('i',1)[0] != '\x01'
# and define appropriate constants
if(_big_endian):
NaN = struct.unpack('d', '\x7F\xF8\x00\x00\x00\x00\x00\x00')[0]
PosInf = struct.unpack('d', '\x7F\xF0\x00\x00\x00\x00\x00\x00')[0]
NegInf = -PosInf
else:
NaN = struct.unpack('d', '\x00\x00\x00\x00\x00\x00\xf8\xff')[0]
PosInf = struct.unpack('d', '\x00\x00\x00\x00\x00\x00\xf0\x7f')[0]
NegInf = -PosInf
def _double_as_bytes(dval):
"Use struct.unpack to decode a double precision float into eight bytes"
tmp = list(struct.unpack('8B',struct.pack('d', dval)))
if not _big_endian:
tmp.reverse()
return tmp
##
## Functions to extract components of the IEEE 754 floating point format
##
def _sign(dval):
"Extract the sign bit from a double-precision floating point value"
bb = _double_as_bytes(dval)
return bb[0] >> 7 & 0x01
def _exponent(dval):
"""Extract the exponentent bits from a double-precision floating
point value.
Note that for normalized values, the exponent bits have an offset
of 1023. As a consequence, the actual exponentent is obtained
by subtracting 1023 from the value returned by this function
"""
bb = _double_as_bytes(dval)
return (bb[0] << 4 | bb[1] >> 4) & 0x7ff
def _mantissa(dval):
"""Extract the _mantissa bits from a double-precision floating
point value."""
bb = _double_as_bytes(dval)
mantissa = bb[1] & 0x0f << 48
mantissa += bb[2] << 40
mantissa += bb[3] << 32
mantissa += bb[4]
return mantissa
def _zero_mantissa(dval):
"""Determine whether the mantissa bits of the given double are all
zero."""
bb = _double_as_bytes(dval)
return ((bb[1] & 0x0f) | reduce(operator.or_, bb[2:])) == 0
##
## Functions to test for IEEE 754 special values
##
def isNaN(value):
"Determine if the argument is a IEEE 754 NaN (Not a Number) value."
return (_exponent(value)==0x7ff and not _zero_mantissa(value))
def isInf(value):
"""Determine if the argument is an infinite IEEE 754 value (positive
or negative inifinity)"""
return (_exponent(value)==0x7ff and _zero_mantissa(value))
def isFinite(value):
"""Determine if the argument is an finite IEEE 754 value (i.e., is
not NaN, positive or negative inifinity)"""
return (_exponent(value)!=0x7ff)
def isPosInf(value):
"Determine if the argument is a IEEE 754 positive infinity value"
return (_sign(value)==0 and _exponent(value)==0x7ff and \
_zero_mantissa(value))
def isNegInf(value):
"Determine if the argument is a IEEE 754 negative infinity value"
return (_sign(value)==1 and _exponent(value)==0x7ff and \
_zero_mantissa(value))
##
## Functions to test public functions.
##
def test_isNaN():
assert( not isNaN(PosInf) )
assert( not isNaN(NegInf) )
assert( isNaN(NaN ) )
assert( not isNaN( 1.0) )
assert( not isNaN( -1.0) )
def test_isInf():
assert( isInf(PosInf) )
assert( isInf(NegInf) )
assert( not isInf(NaN ) )
assert( not isInf( 1.0) )
assert( not isInf( -1.0) )
def test_isFinite():
assert( not isFinite(PosInf) )
assert( not isFinite(NegInf) )
assert( not isFinite(NaN ) )
assert( isFinite( 1.0) )
assert( isFinite( -1.0) )
def test_isPosInf():
assert( isPosInf(PosInf) )
assert( not isPosInf(NegInf) )
assert( not isPosInf(NaN ) )
assert( not isPosInf( 1.0) )
assert( not isPosInf( -1.0) )
def test_isNegInf():
assert( not isNegInf(PosInf) )
assert( isNegInf(NegInf) )
assert( not isNegInf(NaN ) )
assert( not isNegInf( 1.0) )
assert( not isNegInf( -1.0) )
# overall test
def test():
test_isNaN()
test_isInf()
test_isFinite()
test_isPosInf()
test_isNegInf()
if __name__ == "__main__":
test()
| Python |
from sys import stdin, stdout
#fin = stdin
fin = open('input.txt')
fout = stdout
#fout = open('output.txt', 'w')
s = list(fin.read().strip().split())
n = list(map(len, s))
dp = [[0] * (n[1] + 1) for i in range(n[0] + 1)]
fr = [[(0, 0)] * (n[1] + 1) for i in range(n[0] + 1)]
for i in range(n[0]):
for j in range(n[1]):
dp[i][j], fr[i][j] = dp[i - 1][j], (i - 1, j)
if dp[i][j] < dp[i][j - 1]:
dp[i][j], fr[i][j] = dp[i][j - 1], (i, j - 1)
if s[0][i] == s[1][j]:
dp[i][j], fr[i][j] = dp[i - 1][j - 1] + 1, (i - 1, j - 1)
ans = []
i, j = map(lambda x: x - 1, n)
while dp[i][j] != 0:
if s[0][i] == s[1][j]:
ans.append(s[0][i])
i, j = fr[i][j]
print(''.join(ans[::-1]), file=fout)
| Python |
from sys import stdin, stdout
fin = stdin
#fin = open('input.txt')
fout = stdout
#fout = open('output.txt', 'w')
n = int(fin.readline())
ans = [[] for i in range(n)]
for i in range(n):
for j in range(n):
ans[(i + j) % n].append(i * n + j + 1)
for i in range(n):
print(' '.join(map(str, ans[i])), ' , sum = ', sum(ans[i])) | Python |
'''
Created on 21-03-2011
@author: maciek
'''
def formatString(format, **kwargs):
'''
'''
if not format: return ''
for arg in kwargs.keys():
format = format.replace("{" + arg + "}", "##" + arg + "##")
format = format.replace ("{", "{{")
format = format.replace("}", "}}")
for arg in kwargs.keys():
format = format.replace("##" + arg + "##", "{" + arg + "}")
res = format.format(**kwargs)
res = res.replace("{{", "{")
res = res.replace("}}", "}")
return res | Python |
'''
Created on 21-03-2011
@author: maciek
'''
from IndexGenerator import IndexGenerator
from optparse import OptionParser
import os
import tempfile
import shutil
import logging
logging.basicConfig(level = logging.DEBUG)
parser = OptionParser()
parser.add_option('-n', '--app-name', action='store', dest='appName', help='aplication name')
parser.add_option('-u', '--release-urls', action='store', dest='releaseUrls', help='URLs of download files - as coma separated list of entrires')
parser.add_option('-d', '--destination-directory', action='store', dest='otaAppDir', help='Directory where OTA files are created')
parser.add_option('-v', '--version', action='store', dest='version', help='Version of the application')
parser.add_option('-r', '--releases', action='store', dest='releases', help='Release names of the application')
parser.add_option('-R', '--release-notes', action='store', dest='releaseNotes', help='Release notes of the application (in txt2tags format)')
parser.add_option('-D', '--description', action='store', dest='description', help='Description of the application (in txt2tags format)')
(options, args) = parser.parse_args()
if options.appName == None:
parser.error("Please specify the appName.")
elif options.releaseUrls == None:
parser.error("Please specify releaseUrls")
elif options.otaAppDir == None:
parser.error("Please specify destination directory")
elif options.version == None:
parser.error("Please specify version")
elif options.releases == None:
parser.error("Please specify releases")
elif options.releaseNotes == None:
parser.error("Please specify releaseNotes")
elif options.description == None:
parser.error("Please specify description")
appName = options.appName
releaseUrls = options.releaseUrls
otaAppDir = options.otaAppDir
version = options.version
releases = options.releases
releaseNotes = options.releaseNotes
description = options.description
def findIconFilename():
iconPath = "res/drawable-hdpi/icon.png"
if not os.path.exists(iconPath):
iconPath = "res/drawable-mdpi/icon.png"
if not os.path.exists(iconPath):
iconPath = "res/drawable-ldpi/icon.png"
if not os.path.exists(iconPath):
iconPath = "res/drawable/icon.png"
logging.debug("IconPath: "+iconPath)
return iconPath
def createOTApackage():
'''
crates all needed files in tmp dir
'''
releaseNotesContent = open(releaseNotes).read()
descriptionContent = open(description).read()
indexGenerator = IndexGenerator(appName, releaseUrls, releaseNotesContent, descriptionContent, version, releases)
index = indexGenerator.get();
tempIndexFile = tempfile.TemporaryFile()
tempIndexFile.write(index)
tempIndexFile.flush()
tempIndexFile.seek(0)
return tempIndexFile
tempIndexFile = createOTApackage()
if not os.path.isdir(otaAppDir):
logging.debug("creating dir: "+otaAppDir)
os.mkdir(otaAppDir)
else:
logging.warning("dir: "+otaAppDir+" exists")
indexFile = open(os.path.join(otaAppDir,"index.html"),'w')
shutil.copyfileobj(tempIndexFile, indexFile)
srcIconFileName = findIconFilename()
disIconFileName = os.path.join(otaAppDir,"Icon.png")
shutil.copy(srcIconFileName,disIconFileName)
| Python |
'''
Created on 21-03-2011
@author: maciek
'''
from formater import formatString
import os
class IndexGenerator(object):
'''
Generates Index.html for iOS app OTA distribution
'''
basePath = os.path.dirname(__file__)
templateFile = os.path.join(basePath,"templates/index.tmpl")
releaseUrls = ""
appName = ""
changeLog = ""
description = ""
version = ""
release = ""
def __init__(self,appName, releaseUrls, changeLog, description, version, releases):
'''
Constructor
'''
self.appName = appName
self.releaseUrls = releaseUrls
self.changeLog = changeLog
self.description = description
self.version = version
self.releases = releases
def get(self):
'''
returns index.html source code from template file
'''
urlList = self.releaseUrls.split(",")
releaseList = self.releases.split(",")
generatedHtml=""
count=0;
for release in releaseList:
generatedHtml += " <li>\n"
generatedHtml += " <h3><a href=\"javascript:load('" + urlList[count] + "')\">" + release + "</a></h3>\n"
generatedHtml += " </li>\n"
count += 1
template = open(self.templateFile).read()
index = formatString(template, downloads=generatedHtml,
changeLog=self.changeLog,
appName=self.appName,
description=self.description,
version = self.version);
return index | Python |
from math import sqrt, sin, cos, tan, pi
from numpy import *
# linear interp between key frames startKey and endKey, returns a list of frames
# includes startKey's time exclude endKey's time
def lerpt(startKey, endKey):
step = 1 / 30.0 #assuming 30 frames a second
#key format is [time, pose] i have the time now, so i just pull out the pose
startTime = startKey[0]
endTime = endKey[0]
startPose = startKey[1]
endPose = endKey[1]
interval = float(endTime) - float(startTime)
time = 0
frames = []
# i don't want a frame where time practically equals the endTime
while time < interval-step:
#frame format is [time, pose information]
#example [time, x, y, z, rotation]
frame = [startTime + time]
for i in range(0, len(startPose)):
frame.append(lerp(startPose[i], endPose[i], time / interval))
frames.append(frame)
time += step
return frames
def lerp(x, y, pct):
pct = min(1.0, max(0.0, pct))
return x * (1-pct) + y * pct
#frames are x, y, z, rot, time
def writeFrames(f, keyframes):
for i in range(0, len(keyframes)-1):
thisKey = keyframes[i]
nextKey = keyframes[i+1]
frames = lerpt(thisKey, nextKey)
for frame in frames:
for i in range(0, len(frame)-1):
f.write(str(frame[i]) + " ")
f.write(str(frame[len(frame)-1]))
f.write("\n")
#assume that characters are squares with sides of length .025
def stopGoCardinal(): #this is the up and down version
f = open("stopGo.patch", 'w')
f.write("4\n") #number of verts in the polygon
f.write("0.0 0.0 0.0\n") #polygon
f.write("1.0 0.0 0.0\n")
f.write("1.0 1.0 0.0\n")
f.write("0.0 1.0 0.0\n")
f.write("2\n") #number of characters
f.write("# 0 1 2 3\n") #entrance, start of interaction, end of interaction, exit
#write frames here
#define some key frames. list of lists [time, [x, y, z, rotation (in degrees)]]
kfsCharacter1 = [[0, [.5, 0, 0, 0]],
[1, [.5, .3, 0, 0]],
[2, [.5, .3, 0, 0]],
[3, [.5, 1, 0, 0,]]]
writeFrames(f, kfsCharacter1)
f.write("# 0 1 2 3\n") #intervals for second character
#write frames here
kfsCharacter2 = [[0, [1, .5, 0, 45]],
[1, [.7, .5, 0, 45]],
[2, [.3, .5, 0, 45]],
[3, [0, .5, 0, 45]]]
writeFrames(f, kfsCharacter2)
def rightTurn():
f = open("rightTurn.patch", 'w')
f.write("5\n") #number of verts in the polygon
f.write("0.0 0.0 0.0\n") #polygon
f.write("1.0 0.0 0.0\n")
f.write("1.0 1.0 0.0\n")
f.write("0.5 1.0 0.0\n")
f.write("0.0 0.5 0.0\n")
f.write("1\n") #number of characters
f.write("# 0 1 2 3\n") #entrance, start of interaction, end of interaction, exit
#write frames here
#define some key frames. list of lists [time, [x, y, z, rotation (in degrees)]]
kfsCharacter1 = [[0, [.5, 0, 0, 0]],
[1, [.5, .5, 0, 0]],
[2, [.5, .5, 0, -90]],
[3, [1, .5, 0, -90,]]]
writeFrames(f, kfsCharacter1)
def leftTurn():
f = open("leftTurn.patch", 'w')
f.write("4\n") #number of verts in the polygon
f.write("0.0 0.0 0.0\n") #polygon
f.write("1.0 0.0 0.0\n")
f.write("1.0 1.0 0.0\n")
f.write("0.0 1.0 0.0\n")
f.write("1\n") #number of characters
f.write("# 0 1 2 3\n") #entrance, start of interaction, end of interaction, exit
#write frames here
#define some key frames. list of lists [time, [x, y, z, rotation (in degrees)]]
kfsCharacter1 = [[0, [.5, 0, 0, 0]],
[1, [.5, .5, 0, 0]],
[2, [.5, .5, 0, 90]],
[3, [0, .5, 0, 90,]]]
writeFrames(f, kfsCharacter1)
def straightAhead():
f = open("straightAhead.patch", 'w')
f.write("4\n") #number of verts in the polygon
f.write(".25 0.0 0.0\n") #polygon
f.write(".75 0.0 0.0\n")
f.write(".75 1.0 0.0\n")
f.write(".25 1.0 0.0\n")
f.write("1\n") #number of characters
f.write("# 0 1.5 1.5 3\n") #entrance, start of interaction, end of interaction, exit
#write frames here
#define some key frames. list of lists [time, [x, y, z, rotation (in degrees)]]
kfsCharacter1 = [[0, [.5, 0, 0, 0]],
[3, [.5, 1, 0, 0]]]
writeFrames(f, kfsCharacter1)
def turnAround():
f = open("turnAround.patch", 'w')
f.write("4\n") #number of verts in the polygon
f.write("0.0 0.0 0.0\n") #polygon
f.write("1.0 0.0 0.0\n")
f.write("1.0 1.0 0.0\n")
f.write("0.0 1.0 0.0\n")
f.write("1\n") #number of characters
f.write("# 0 1 2 3\n") #entrance, start of interaction, end of interaction, exit
#write frames here
#define some key frames. list of lists [time, [x, y, z, rotation (in degrees)]]
kfsCharacter1 = [[0, [.5, 0, 0, 0]],
[1, [.5, .5, 0, 0]],
[2, [.5, .5, 0, -180]],
[3, [.5, 0, 0, -180,]]]
writeFrames(f, kfsCharacter1)
def diamondThing():
f = open("diamondThing.patch", 'w')
f.write("4\n") #number of verts in the polygon
f.write("0.0 0.0 0.0\n") #polygon
f.write("1.0 0.0 0.0\n")
f.write("1.0 1.0 0.0\n")
f.write("0.0 1.0 0.0\n")
f.write("2\n") #number of characters
f.write("# 0 1 2 3\n") #entrance, start of interaction, end of interaction, exit
#write frames here
#define some key frames. list of lists [time, [x, y, z, rotation (in degrees)]]
kfsCharacter1 = [[0, [.5, 0, 0, 0]],
[1, [.5, .4, 0, 0]],
[1.5, [.6, .4, 0, -45]],
[2, [.6, .5, 0, -90]],
[3, [1, .5, 0, -90,]]]
writeFrames(f, kfsCharacter1)
f.write("# 0 1 2 3\n") #entrance, start of interaction, end of interaction, exit
#write frames here
#define some key frames. list of lists [time, [x, y, z, rotation (in degrees)]]
kfsCharacter2 = [[0, [0, .5, 0, -90]],
[1, [.4, .5, 0, -90]],
[1.5, [.4, .6, 0, -45]],
[2, [.5, .6, 0, 0]],
[3, [.5, 1, 0, 0,]]]
writeFrames(f, kfsCharacter2)
def smallTest():
f = open("smalltest.patch", 'w')
f.write("4\n") #number of verts in the polygon
f.write("0.0 0.0 0.0\n") #polygon
f.write("1.0 0.0 0.0\n")
f.write("1.0 1.0 0.0\n")
f.write("0.0 1.0 0.0\n")
f.write("1\n") #number of characters
f.write("# 0 .33 .66 1\n") #entrance, start of interaction, end of interaction, exit
#write frames here
#define some key frames. list of lists [time, [x, y, z, rotation (in degrees)]]
kfsCharacter1 = [[0, [.5, 0, 0, 0]],
[.33, [.5, .3, 0, 0]],
[.66, [.5, .3, 0, 0]],
[1, [.5, 1, 0, 0,]]]
writeFrames(f, kfsCharacter1)
def smallTest2():
f = open("smalltest2.patch", 'w')
f.write("4\n") #number of verts in the polygon
f.write("0.0 0.0 0.0\n") #polygon
f.write("1.0 0.0 0.0\n")
f.write("1.0 1.0 0.0\n")
f.write("0.0 1.0 0.0\n")
f.write("1\n") #number of characters
f.write("# 0 .33 .66 1\n") #entrance, start of interaction, end of interaction, exit
#write frames here
#define some key frames. list of lists [time, [x, y, z, rotation (in degrees)]]
kfsCharacter1 = [[0, [.5, 0, 0, 0]],
[.33, [.5, .5, 0, 0]],
[.66, [.5, .5, 0, 0]],
[1, [1, .5, 0, 0,]]]
writeFrames(f, kfsCharacter1)
#assume that characters are squares with sides of length .025
def stopGoSmall(): #this is the up and down version
f = open("stopGoSmall.patch", 'w')
f.write("4\n") #number of verts in the polygon
f.write("0.0 0.0 0.0\n") #polygon
f.write("1.0 0.0 0.0\n")
f.write("1.0 1.0 0.0\n")
f.write("0.0 1.0 0.0\n")
f.write("2\n") #number of characters
f.write("# 0 .33 .66 1\n") #entrance, start of interaction, end of interaction, exit
#write frames here
#define some key frames. list of lists [time, [x, y, z, rotation (in degrees)]]
kfsCharacter1 = [[0, [.5, 0, 0, 0]],
[.33, [.5, .3, 0, 0]],
[.66, [.5, .3, 0, 0]],
[1, [.5, 1, 0, 0,]]]
writeFrames(f, kfsCharacter1)
f.write("# 0 .33 .66 1\n") #intervals for second character
#write frames here
kfsCharacter2 = [[0, [1, .5, 0, 45]],
[.33, [.7, .5, 0, 45]],
[.66, [.3, .5, 0, 45]],
[1, [0, .5, 0, 45]]]
writeFrames(f, kfsCharacter2)
leftTurn()
#stopGoSmall()
#diamondThing()
#turnAround()
#straightAhead()
#rightTurn()
#stopGoCardinal()
#stopGo()
| Python |
#!/usr/bin/python
from random import uniform
def find_xIndex(x, array):
i = 0
for value in array:
if x < value:
return i
i += 1
return 0
func = lambda x:x**2.
weight = lambda x:x**1.
x = [.001*(i+1.) for i in range(1000)]
# create the weight prob. boundary and name as norm_w
weightList = [weight(i) for i in x]
sum_w = sum(weightList)
norm_w = []
t_sum = 0.0
for i in weightList:
t_sum += i/sum_w
norm_w.append(t_sum)
result = 0.0
result_w = 0.0
N_iter = 10000
# start calculate average
for i in range(N_iter):
ran = uniform(0,1)
x_index = find_xIndex(ran, norm_w)
result += func(x[x_index])/weight(x[x_index])
result_w += 1/weight(x[x_index])
# normalized
avg = result/float(N_iter)
print avg
| Python |
#!/usr/bin/python
import time
from random import uniform
# position : [up, down, left, right]
moveList = {
1 : (4, 0, 0, 2),
2 : (5, 0, 1, 3),
3 : (6, 0, 2, 0),
4 : (7, 1, 0, 5),
5 : (8, 2, 4, 6),
6 : (9, 3, 5, 0),
7 : (0, 4, 0, 8),
8 : (0, 5, 7, 9),
9 : (0, 6, 8, 0)
}
Nhits = {
1 : 0,
2 : 0,
3 : 0,
4 : 0,
5 : 0,
6 : 0,
7 : 0,
8 : 0,
9 : 0
}
## create record
#iter_time = []
#record = {}
#tmp = []
#for i in Nhits.keys():
# record[i] = tmp
N = 4000
startPosition = 9
# start simulate
Nhits[startPosition] += 1
position = startPosition
#iter_time.append(1)
#for i in Nhits.keys():
# record[i].append(Nhits[i])
for i in range(1, N):
direction = int(uniform(0, 4)) # 0:up, 1:down, 2:left, 3:right
nextPosition = moveList[position][direction]
if nextPosition != 0:
position = nextPosition
Nhits[position] += 1
# iter_time.append(i)
# for j in Nhits.keys():
# record[j].append(Nhits[j])
print position, Nhits.values()
#time.sleep(.005)
# end
# convert array format for gnuplot
plotarray = []
for i in moveList.keys():
print i, '\t', float(Nhits[i])/float(N)
plotarray.append([i, float(Nhits[i])/float(N)])
import Gnuplot as gp
plotItem = gp.PlotItems.Data(plotarray, with_='boxes')
g = gp.Gnuplot()
g('set yrange [0:1]')
g.xlabel('Position')
g.ylabel('Probability')
g.plot(plotItem)
#site_hist = []
#for site in Nhits.keys():
# tmp = []
# for i in range(len(record[site])):
# tmp.append([i, record[site][i]])
# site_hist.append(tmp)
#
#
#plotItems = []
#for i in Nhits.keys():
# plotItems.append(gp.PlotItems.Data(site_hist[i-1]))
#
#g_iter = gp.Gnuplot()
#g_iter.xlabel('Move times')
#g_iter.ylabel('Count')
#for i in plotItems:
# g_iter.replot(i)
raw_input()
| Python |
#!/usr/bin/python
import Gnuplot
import math
from random import uniform
def markov_throw(delta, x, y):
return x + uniform(-delta, delta), y + uniform(-delta, delta)
def direct_throw(start, end):
return uniform(start, end), uniform(start, end)
def IsInSquare(x, y):
if abs(x) < 1. and abs(y) < 1.:
return True
else:
return False
def IsInCircle(x, y):
if x**2 + y**2 < 1.:
return True
else:
return False
def CalErr(summ, sqrSum, N):
avg = summ/float(N)
st_err = (sqrSum/float(N)-avg**2)**1/2
return st_err
def markov_pi(delta, N):
x, y = 1., 1. # initial position
N_hits = 0
record = []
sumPi = 0.0 #sum(f)
sumSqrPi = 0.0 #sum(f^2)
for i in range(1,N):
tx, ty = markov_throw(delta, x, y)
if IsInSquare(tx, ty):
x, y = tx, ty
if IsInCircle(x, y):
N_hits += 1
c_pi = N_hits * 4. / float(i)
sumPi += c_pi
sumSqrPi += c_pi**2
if i > 2:
err = CalErr(sumPi, sumSqrPi, i)
else:
err = 0.0
if i % 100 == 1:
record.append([i, c_pi, err])
return record
if __name__== "__main__":
#data = markov_pi(.3, 10001)
#item_pi = Gnuplot.PlotItems.Data(data, cols=(0,1), title = 'pi', with_='lp pt 6')
#item_err = Gnuplot.PlotItems.Data(data, cols=(0,2), title = 'err', with_='lp pt 6')
#g1 = Gnuplot.Gnuplot()
#g1('set multiplot title "Markov_pi" layout 2,1')
#g1.plot(item_pi)
#g1.plot(item_err)
N = 10001 # iter times
delta_value = [0.01, 0.1, 0.3, 0.5, 0.99]
pi_items = []
err_items = []
for i in delta_value:
calc = markov_pi(i, N)
pi_items.append(Gnuplot.PlotItems.Data(calc, cols=(0, 1), title='pi '+str(i)))
err_items.append(Gnuplot.PlotItems.Data(calc, cols=(0, 2), title='err '+str(i)))
g_pi = Gnuplot.Gnuplot()
g_pi.xlabel('N')
g_pi.ylabel('pi')
g_pi('set yrange [0:4.7]')
g_err = Gnuplot.Gnuplot()
g_err('set logscale xy')
g_err.xlabel('N')
g_err.ylabel('err')
for i in range(len(delta_value)):
g_pi.replot(pi_items[i])
g_err.replot(err_items[i])
raw_input('Press Enter to exit')
| Python |
from random import uniform
def cont_integral(a, b, N):
summ = 0.0
summ_n = 0.0
for i in range(N):
x = uniform(0, 1)**(1./(a+1.))
area = x**(b-a)
summ += area
return summ/float(N)
a = 2.
b = 3.
N = 1000
print cont_integral(a, b, N)
| Python |
#!/usr/bin/python
import Gnuplot
import math
from random import uniform as ran
def naive_ran():
global idum
m = 134456
n = 8121
k = 28411
idum = (idum*n + k) % m
ran = idum/float(m)
return ran
def naive_uniform(a, b):
return float(a) + float(b-a)*naive_ran()
def markov_naive(seed, delta, N):
global idum
idum = seed
x, y = 1., 1. # initial position
N_hits = 0
record = []
for i in range(1,N):
del_x = naive_uniform(-delta, delta)
del_y = naive_uniform(-delta, delta)
if abs(x+del_x) < 1. and abs(y+del_y) < 1.:
# Don't move if out of range.
x, y = x + del_x, y + del_y
if x**2 + y**2 < 1.:
# N_hits + 1 while the point is in the circle of x^2+y^2=1.
N_hits += 1
c_pi = N_hits * 4. / float(i)
err = abs(math.pi - c_pi)/math.pi*100.
if i%100 ==1:
# record per 100 points
record.append([i, err])
return record
g = Gnuplot.Gnuplot()
g.xlabel('iter times')
g.ylabel('err (%)')
g.plot(markov_naive(123456, 0.3, 10000))
raw_input()
| Python |
from random import uniform
class ThrowMethod:
def Throw(self):
return 0.
class DirectPyThrow(ThrowMethod):
def __init__(self, start = 0., end = 1.):
self.start = start
self.end = end
def Throw(self):
return uniform(self.start, self.end), uniform(self.start, self.end)
class DirectNaiveThrow(ThrowMethod):
def __init__(self, seed = 43289, start = 0., end = 1.):
self.start = start
self.lens = end - start
self.idum = seed
self.m = 134456
self.n = 8121
self.k = 28411
def Throw(self):
self.idum = (self.idum*self.n + self.k) % self.m
return self.start + self.lens*float(self.idum)/float(self.m), self.start + self.lens*float(self.idum)/float(self.m)
class MarkovThrow(ThrowMethod):
def __init__(self, delta = .1, position = (1., 1.)):
self.delta = delta
self.x = position[0]
self.y = position[1]
def Throw(self):
tmp_x = self.x + uniform(-self.delta, self.delta)
tmp_y = self.y + uniform(-self.delta, self.delta)
if (abs(tmp_x) < 1.) and (abs(tmp_y) < 1.):
self.x = tmp_x
self.y = tmp_y
return self.x, self.y
##############################################################################
class Record:
def __init__(self):
self.hist = []
def Clear(self):
self.hist = []
def Add(self, value):
self.hist.append(value)
def GetRecord(self):
return self.hist
##############################################################################
class MonteCarlo_pi:
def __init__(self):
self.hits = 0
self.rejects = 0
self.x = 0.0
self.y = 0.0
def SetThrowMethod(self, throwMethod):
self.throwMethod = throwMethod
def Throw(self):
self.x, self.y = self.throwMethod.Throw()
def IsInCircleRange(self):
return self.x**2. + self.y**2. < 1.
def HitNotify(self):
self.record.Add(1)
def RejectNotify(self):
self.record.Add(0)
def SetRecord(self, record):
self.record = record
def GetRecord(self):
return self.record.GetRecord()
def Simulate(self, N):
self.record.Clear()
for i in range(N):
self.Throw()
if self.IsInCircleRange():
self.HitNotify()
else:
self.RejectNotify()
class MonteCarlo_integral:
def __init__(self):
self.prob = 0.
self.obser = 0.
self.diff = 0.
def SetFunction(self, prob, obser):
self.prob = prob
self.obser = obser
self.diff = self.obser - self.prob
def SetRecord(self, record):
self.record = record
def GetRecord(self):
return self.record.GetRecord()
def Throw(self):
self.x = uniform(0, 1)**(1./(self.prob + 1.))
def Simulate(self, N):
self.record.Clear()
for i in range(N):
self.Throw()
self.record.Add(self.x**self.diff)
##############################################################################
def Binning(base = 2, record = []):
#binning 2N array to N array
tmp_sum = 0.0
tmp_record = []
count = 0
for i in record:
if count < base:
tmp_sum += i
else:
tmp_record.append(tmp_sum/float(count))
count = 0
tmp_sum = i
count += 1
tmp_record.append(tmp_sum/float(count))
return tmp_record
def CalAvgStderr(record):
#calculate average stardard err
sumAvg = 0.0
sumSquar = 0.0
count = 0
for i in record:
count += 1
sumAvg += i
sumSquar += i**2.
avgStderr = ((sumSquar/float(count)-(sumAvg/float(count))**2.)/float(count))**(1./2.)
return avgStderr
def CalStderrWithBinning(base, record):
lens = 0
avgStderrBinning = []
numsBinning = []
lens = len(record)
if (lens > base):
numsBinning.append(lens)
avgStderrBinning.append(CalAvgStderr(record))
while(lens > base):
record = Binning(base, record)
avgStderrBinning.append(CalAvgStderr(record))
lens = len(record)
numsBinning.append(lens)
return numsBinning, avgStderrBinning
if __name__== '__main__':
baseNum = 2
N = baseNum**15
delta = .3
startPosition = (1., 1.)
p = MonteCarlo_pi()
#throwMethod = DirectPyThrow(0., 1.)
#throwMethod = DirectNaiveThrow(12345, 0., 1.)
throwMethod = MarkovThrow(delta, startPosition)
p.SetThrowMethod(throwMethod)
p.SetRecord(Record())
p.Simulate(N)
numBinning, errBinning = CalStderrWithBinning(baseNum, p.GetRecord())
for i in range(len(numBinning)):
print numBinning[i], errBinning[i]
import Gnuplot
g = Gnuplot.Gnuplot()
g.plot(errBinning)
raw_input()
############################################################################
#N = 1000
#prob = 2.
#obser = 3.
#c = MonteCarlo_integral()
#c.SetRecord(Record())
#c.SetFunction(prob, obser)
#c.Simulate(N)
#print sum(c.GetRecord())/float(N)
| Python |
import numpy as np
from random import uniform
class record(object):
def __init__(self):
self.summ = 0. # summation f
self.sumSquare = 0. # summation f^2
self.count = 0
def Clear(self):
self.summ = 0.
self.sumSquare = 0.
self.count = 0
def Add(self, value):
self.count += 1
self.summ += value
self.sumSquare += value*value
def GetAvg(self):
return self.summ/float(self.count) # return <f>
def GetAvgSquare(self):
return self.sumSquare/float(self.count) # return <f^2>
# end of record
SPINUP = 1. # define Spin up = 1.
SPINDOWN = -1. # define Spin down = -1.
class twoD_IsingModel(object):
def __init__(self, dim = (1, 1), k = 1., J = 1.):
self.energy = 0. # total energy of lattice
self.sumSpins = 0. # total magnetic field of lattice
self.k = k # Bolzmann constant
self.J = J # spin energy
self.nx = dim[0]
self.ny = dim[1]
self.lattice = np.zeros((self.nx, self.ny)) # create lattice
self.area = float(self.nx * self.ny)
self.E_Record = record() # record energy of lattice
self.sumSpins_Record = record() # record magnetism of lattice
def SetTemperatureField(self, J = 1., T = 10., H = 0.):
self.H = H # external magnetic field
self.T = T # temperature
self.beta = 1./self.T/self.k
def Thermalize(self):
for i in xrange(self.nx):
for j in xrange(self.ny):
self.lattice[i,j] = SPINUP if int(uniform(0, 2))==1 else SPINDOWN
def CalcEnergy(self):
tmpE = 0.
for i in xrange(self.nx):
for j in xrange(self.ny):
tmpE += -self.J \
* ( self.lattice[i,j]*self.lattice[i,j-1] \
+ self.lattice[i,j]*self.lattice[i-1,j] ) \
- self.H*self.lattice[i,j]
self.energy = tmpE
def CalcSumSpins(self):
self.sumSpins = self.lattice.sum()
def FlipSpin(self, ix, iy):
self.lattice[ix,iy] = SPINDOWN if self.lattice[ix,iy] == SPINUP else SPINUP
def GetNeighborEnergy(self, ix, iy):
f_ix = (ix+1) % self.nx
b_ix = ix-1
f_iy = (iy+1) % self.ny
b_iy = iy-1
E = -( self.lattice[f_ix,iy] + self.lattice[b_ix,iy] \
+ self.lattice[ix,f_iy] + self.lattice[ix,b_iy] ) \
* self.lattice[ix,iy] * self.J \
- self.H * self.lattice[ix,iy]
return E
def Prob(self, deltaE):
if np.e**(-deltaE*self.beta) > uniform(0, 1):
return True
else:
return False
def LocalUpdate(self, x, y):
oldE = self.GetNeighborEnergy(x, y)
oldSpin = self.lattice[x,y]
self.FlipSpin(x, y)
newE = self.GetNeighborEnergy(x, y)
newSpin = self.lattice[x,y]
deltaE = newE - oldE
deltaSpin = newSpin - oldSpin
if (deltaE <= 0) or self.Prob(deltaE):
self.energy += deltaE
self.sumSpins += deltaSpin
else:
self.FlipSpin(x, y)
def Start(self, N, thermalStep):
self.E_Record.Clear()
self.sumSpins_Record.Clear()
self.CalcEnergy()
self.CalcSumSpins()
for i in xrange(N):
x = int(uniform(0, self.nx))
y = int(uniform(0, self.ny))
self.LocalUpdate(x, y)
if i > thermalStep:
self.sumSpins_Record.Add(self.GetSumSpins())
self.E_Record.Add(self.GetEnergy())
def GetEnergy(self):
return self.energy
def GetSumSpins(self):
return self.sumSpins
def GetAvgEnergy(self):
return self.E_Record.GetAvg()/self.area
def GetAvgMagnetism(self):
return self.sumSpins_Record.GetAvg()/self.area
def GetAvgSquarMagnetism(self):
return self.sumSpins_Record.GetAvgSquare()/self.area**2.
def GetSpecificHeat(self):
return ( self.E_Record.GetAvgSquare()/self.area**2. \
- (self.E_Record.GetAvg()/self.area)**2. ) \
* self.beta/self.T
def GetMagneticSusceptibility(self):
return ( self.sumSpins_Record.GetAvgSquare()/self.area**2. \
- (self.sumSpins_Record.GetAvg()/self.area)**2. ) \
* self.beta
# end of twoD_IsingModel
def SaveData(savePath="", data=[], labels=""):
fp = open(savePath, 'w')
fp.write(labels + '\n')
for column in data:
for rowItem in column:
fp.write(str(rowItem)+'\t')
fp.write('\n')
fp.close()
# end of SaveData
def LoopT_IsingModel( model, TList ):
avgE_List = [] #<E>
avgM_List = [] #<M>
avgSquarM_List = [] # <M^2>
MS_List = [] # Magnetic suscep
SH_List = [] # Specific heat
model.Thermalize()
for i in TList:
print "Temperature", i
model.SetTemperatureField( T = i, H = 0. )
model.Start( iterN, thermalStep )
avgE_List.append( model.GetAvgEnergy() )
avgM_List.append( model.GetAvgMagnetism() )
avgSquarM_List.append( model.GetAvgSquarMagnetism() )
MS_List.append( model.GetMagneticSusceptibility() )
SH_List.append( model.GetSpecificHeat() )
dataLabel = 'temperature\tM_Square\tSuscep\tSpecificHeat\t'
return dataLabel, \
[ temperatureList, \
avgSquarM_List, \
MS_List, \
SH_List \
]
if __name__ == '__main__':
import time
L = 20 # lattice size
dim = ( L, L ) # dim of lattice is L*L
iterNumPerSite = 10
iterN = L**2 * iterNumPerSite # iter times
thermalStep = int( iterN * 0.3 ) # reach thermalize after thermalStep
temperatureList = np.linspace( 5., 1., 41 ) # cooling down
tic = time.time()
model = twoD_IsingModel( dim )
dataLabel, tmpData = LoopT_IsingModel( model, temperatureList )
toc = time.time()
print "Time ", toc-tic
allData = np.array( tmpData ).transpose()
savePath = "2D_IsingModelResult.txt"
SaveData( savePath = savePath, data = allData, labels = dataLabel )
## reshape data for gnuplot
#avgEList = []
#avgMList = []
#avgSquarMList = []
#SHList = []
#MSList = []
#for i in range(len(temperatureList)):
# avgEList.append([temperatureList[i], avgE_List[i]])
# avgMList.append([temperatureList[i], avgM_List[i]])
# avgSquarMList.append([temperatureList[i], avgSquarM_List[i]])
# SHList.append([temperatureList[i], SH_List[i]])
# MSList.append([temperatureList[i], MS_List[i]])
#import Gnuplot
#g = Gnuplot.Gnuplot()
#g.xlabel("Temperature (K)")
#g.ylabel("<E>")
#g.plot(avgEList)
#g2 = Gnuplot.Gnuplot()
#g2.xlabel("Temperature (K)")
#g2.ylabel("<M>")
#g2.plot(avgMList)
#g3 = Gnuplot.Gnuplot()
#g3.xlabel("Temperature (K)")
#g3.ylabel("Cv")
#g3.plot(SHList)
#g4 = Gnuplot.Gnuplot()
#g4.xlabel("Temperature (K)")
#g4.ylabel("X")
#g4.plot(MSList)
#raw_input()
| Python |
import csv
import numpy as np
a = csv.reader(open('test.txt', 'r'), delimiter='\t')
a.next() # skip a column
data = []
for row in a:
data.append(row[:-1])
print np.array(data, dtype='float')
| Python |
import numpy as np
from scipy.constants import codata
from random import uniform
class OneD_Ising:
def __init__(self, n = 0, J = 1., T = 0., B = 0.):
self.up = 1.
self.down = -1.
self.J = J
self.n = n
self.B = B
self.chain = np.zeros(self.n)
self.currH = 0.
self.sumH = 0.
self.beta = 1/float(T)
def Thermalize(self):
for i in range(self.n):
self.chain[i] = self.up if int(round(uniform(0, 1))) else self.down
self.currH = self.GetEnergy()
def GetEnergy(self):
H = 0.
for i in range(self.n):
# periodic boundary conditions
H += -self.J*self.chain[i]*self.chain[i-1] - self.B*self.chain[i]
return H
def FlipSpin(self, n):
self.chain[n] = self.up if self.chain[n] == self.down else self.down
def LocalUpdate(self):
tmpH = 0.
n = int(round(uniform(0, 1)*(self.n-1)))
self.FlipSpin(n)
tmpH = self.GetEnergy()
if tmpH > self.currH:
if uniform(0, 1) > np.e**((self.currH-tmpH)*self.beta):
self.FlipSpin(n) #undo
else:
self.currH = tmpH
else:
self.currH = tmpH
self.sumH += self.currH
def Simulate(self, N):
self.currH = 0.
self.sumH = 0.
self.Thermalize()
for i in range(N):
self.LocalUpdate()
print self.chain
print self.currH
print self.sumH/float(N)
T = [float(i+1)*10. for i in range(10)]
N = 10000
Ts = []
Hs = []
for i in T:
t = OneD_Ising(20, 1., i, 1.)
t.Simulate(N)
Ts.append(i)
Hs.append(t.sumH/float(N))
import Gnuplot
g = Gnuplot.Gnuplot()
g.plot(Hs)
raw_input()
| Python |
from matplotlib.pylab import *
import numpy as np
t = np.arange(0, 10, 1)
y1 = np.e**(-t/2.)
y2 = np.e**(-t/5.)
ion()
#title("This is test title")
#xlabel("Hello", fontsize=28)
#ylabel("$\Delta$R/R", fontsize=28, weight='bold')
#grid(True)
"""
marker
marker +, o, <, >, ^, ., s, v, x ...etc.
ms markersize
mew markeredgewidth
mec markeredgecolor
mfc markerfillercolor
line
c color
ls linestyle -, --, -., :
lw linewidth
"""
"""
b blue
g green
r red
w white
k black
y yellow
c cyan
m magenta
(0.18, 0.31, 0.31) an RGB tuple
"""
#plot(t, y1, 'b-',marker='o', ms=10, mew=3, mfc='w', mec='b', lw=3, ls='--')
X = 2*rand(20, 20)
#imshow(X, vmax = 3, vmin = 0)
anorm = normalize(1, 2)
imshow(X, anorm)
xlabel("Time delay (ps)")
ylabel("$\Delta$ R/R")
jet()
cbar = colorbar(orientation='horizontal') # vertical
cbar.set_label("Signals")
#gray()
show()
raw_input()
| Python |
from pylab import *
from scipy import optimize
from numpy import *
class Parameter:
def __init__(self, value):
self.value = value
def set(self, value):
self.value = value
def __call__(self):
return self.value
def fit(function, parameters, y, x = None):
def f(params):
return y - function(x)
if x is None: x = arange(y.shape[0])
p = [param() for param in parameters]
return optimize.leastsq(f, p)
# giving initial parameters
mu = Parameter(7)
sigma = Parameter(3)
height = Parameter(5)
p = [mu, sigma, height]
# define your function:
def f(x): return height() * exp(-((x-mu())/sigma())**2)
# create test data
x0 = linspace(0., 20., 100)
data = f(x0)
# fit! (given that data is an array with the data to fit)
fitparas, sucess = fit(f, [mu, sigma, height], data, x0)
mu = Parameter(fitparas[0])
sigma = Parameter(fitparas[1])
height = Parameter(fitparas[2])
plot(x0, data, "bo", x0, f(x0), "r-")
show()
| Python |
from pylab import *
from scipy import *
from scipy import optimize
# if you experience problem "optimize not found", try to uncomment the following line. The problem is present at least at Ubuntu Lucid python scipy package
# from scipy import optimize
# Generate data points with noise
num_points = 500
Tx = linspace(0., 200., num_points)
Ty = Tx
tX = 1.6*cos(2*pi*(Tx/29.-1.32/360.)) +1.4*((0.5-rand(num_points))*exp(2*rand(num_points)**2))
# Fit the first set
fitfunc = lambda p, x: p[0]*cos(2*pi*(x/p[1]+p[2]/360.)) # Target function
errfunc = lambda p, x, y: fitfunc(p, x) - y # Distance to the target function
p0 = [1.6, 31., -0.] # Initial guess for the parameters
p1, success = optimize.leastsq(errfunc, p0[:], args=(Tx, tX))
time = linspace(Tx.min(), Tx.max(), 500)
plot(Tx, tX, "ro", time, fitfunc(p1, time), "r-") # Plot of the data and the fit
# Legend the plot
title("Pump-probe oscillation fitting")
xlabel("Time delay (ps)")
ylabel("$\Delta R/R$")
legend(('x position', 'x fit'))
ax = axes()
text(0.8, 0.07,
'Periodic : %5.3f ps.' % (p1[1]),
fontsize=16,
horizontalalignment='center',
verticalalignment='center',
transform=ax.transAxes)
show()
| Python |
#!/usr/bin/python2.4
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run all tests defined in the DSPL Tools code."""
__author__ = 'Benjamin Yolken <yolken@google.com>'
import unittest
_TEST_MODULE_NAMES = [
'dsplcheck_test',
'dsplgen_test',
'dspllib.data_sources.csv_data_source_test',
'dspllib.data_sources.csv_data_source_sqlite_test',
'dspllib.data_sources.data_source_test',
'dspllib.data_sources.data_source_to_dspl_test',
'dspllib.model.dspl_model_loader_test',
'dspllib.model.dspl_model_test',
'dspllib.validation.dspl_validation_test',
'dspllib.validation.xml_validation_test']
def main():
"""Run all DSPL Tools tests and print the results to stderr."""
test_suite = unittest.TestSuite()
for test_module_name in _TEST_MODULE_NAMES:
test_suite.addTests(
unittest.defaultTestLoader.loadTestsFromName(test_module_name))
unittest.TextTestRunner().run(test_suite)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.4
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests of dsplcheck module."""
__author__ = 'Benjamin Yolken <yolken@google.com>'
import os
import os.path
import re
import shutil
import StringIO
import sys
import tempfile
import unittest
import zipfile
import dsplcheck
_DSPL_CONTENT = (
"""<?xml version="1.0" encoding="UTF-8"?>
<dspl xmlns="http://schemas.google.com/dspl/2010"
xmlns:time="http://www.google.com/publicdata/dataset/google/time">
<import namespace="http://www.google.com/publicdata/dataset/google/time"/>
<info>
<name>
<value>Dataset Name</value>
</name>
</info>
<provider>
<name>
<value>Provider Name</value>
</name>
</provider>
</dspl>""")
_DSPL_CONTENT_BAD_CSV_PATH = (
"""<?xml version="1.0" encoding="UTF-8"?>
<dspl xmlns="http://schemas.google.com/dspl/2010"
xmlns:time="http://www.google.com/publicdata/dataset/google/time">
<import namespace="http://www.google.com/publicdata/dataset/google/time"/>
<info>
<name>
<value>Dataset Name</value>
</name>
</info>
<provider>
<name>
<value>Provider Name</value>
</name>
</provider>
<tables>
<table id="my_table">
<column id="col1" type="string"/>
<column id="col2" type="string"/>
<data>
<file format="csv" encoding="utf-8">non_existent_file.csv</file>
</data>
</table>
</tables>
</dspl>""")
class DSPLCheckTests(unittest.TestCase):
"""Test case for dsplcheck module."""
def setUp(self):
self.input_dir = tempfile.mkdtemp()
self.valid_dspl_file_path = (
os.path.join(self.input_dir, 'valid_dataset.xml'))
self.valid_dspl_file = open(
self.valid_dspl_file_path, 'w')
self.valid_dspl_file.write(_DSPL_CONTENT)
self.valid_dspl_file.close()
def tearDown(self):
shutil.rmtree(self.input_dir)
def testValidDataset(self):
"""Test basic case of dataset that validates and parses correctly."""
self._StdoutTestHelper(
dsplcheck.main, [self.valid_dspl_file_path],
'validates successfully.*Parsing completed.*'
'Checking DSPL model and data.*Completed')
def testBadXMLFilePath(self):
"""Test case where bad XML file path is passed in."""
self._StdoutTestHelper(
dsplcheck.main, ['nonexistent_input_file.xml'],
'Error opening XML file', expect_exit=True)
def testBadCSVFilePath(self):
"""Test case where DSPL file has bad CSV reference."""
bad_csv_dspl_file_path = (
os.path.join(self.input_dir, 'invalid_csv_dataset.xml'))
bad_csv_dspl_file = open(bad_csv_dspl_file_path, 'w')
bad_csv_dspl_file.write(_DSPL_CONTENT_BAD_CSV_PATH)
bad_csv_dspl_file.close()
self._StdoutTestHelper(
dsplcheck.main, [bad_csv_dspl_file_path],
'Error while trying to parse', expect_exit=True)
def testSchemaOnlyOption(self):
"""Test that 'schema only' checking level option works correctly."""
self._StdoutTestHelper(
dsplcheck.main, [self.valid_dspl_file_path, '-l', 'schema_only'],
'validates successfully\W*Completed')
def testSchemaAndModelOption(self):
"""Test that 'schema and model' checking level option works correctly."""
self._StdoutTestHelper(
dsplcheck.main, [self.valid_dspl_file_path, '-l', 'schema_and_model'],
'Checking DSPL model(?! and data)')
def testZipInput(self):
"""Test that module properly handles zipped input."""
zip_path = os.path.join(self.input_dir, 'dataset.zip')
zip_file = zipfile.ZipFile(zip_path, 'w')
zip_file.write(self.valid_dspl_file_path)
zip_file.close()
self._StdoutTestHelper(
dsplcheck.main, [zip_path],
'validates successfully.*Parsing completed.*'
'Checking DSPL model and data.*Completed')
def testZipMissingXML(self):
"""Test that zip file without an XML file produces error."""
zip_path = os.path.join(self.input_dir, 'dataset.zip')
zip_file = zipfile.ZipFile(zip_path, 'w')
zip_file.writestr('test.txt', 'Text')
zip_file.close()
self._StdoutTestHelper(
dsplcheck.main, [zip_path],
'does not have any XML', expect_exit=True)
def testZipMultipleXMLFiles(self):
"""Test that zip file with multiple XML files produces error."""
zip_path = os.path.join(self.input_dir, 'dataset.zip')
zip_file = zipfile.ZipFile(zip_path, 'w')
zip_file.writestr('test.xml', 'Text')
zip_file.writestr('test2.xml', 'Text')
zip_file.close()
self._StdoutTestHelper(
dsplcheck.main, [zip_path],
'multiple XML files', expect_exit=True)
def _StdoutTestHelper(self, function, args,
expected_output, expect_exit=False):
"""Check the stdout output of a function against its expected value.
Args:
function: A function to execute
args: The arguments to pass to the function
expected_output: A regular expression expected to match the stdout output
expect_exit: Boolean indicating whether the function execution should
trigger a system exit
"""
saved_stdout = sys.stdout
redirected_output = StringIO.StringIO()
sys.stdout = redirected_output
if expect_exit:
self.assertRaises(SystemExit, function, args)
else:
function(args)
self.assertTrue(
re.search(expected_output, redirected_output.getvalue(), re.DOTALL))
redirected_output.close()
sys.stdout = saved_stdout
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python2.4
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Check a DSPL dataset for likely import errors."""
__author__ = 'Benjamin Yolken <yolken@google.com>'
import optparse
import os
import shutil
import sys
import tempfile
import time
import zipfile
from dspllib.model import dspl_model_loader
from dspllib.validation import dspl_validation
from dspllib.validation import xml_validation
def LoadOptionsFromFlags(argv):
"""Parse command-line arguments.
Args:
argv: The program argument vector (excluding the script name)
Returns:
A dictionary with key-value pairs for each of the options
"""
usage_string = 'python dsplcheck.py [options] [DSPL XML file or zip archive]'
parser = optparse.OptionParser(usage=usage_string)
parser.set_defaults(verbose=True)
parser.add_option('-q', '--quiet',
action='store_false', dest='verbose',
help='Quiet mode')
parser.add_option(
'-l', '--checking_level', dest='checking_level', type='choice',
choices=['schema_only', 'schema_and_model', 'full'], default='full',
help='Level of checking to do (default: full)')
(options, args) = parser.parse_args(args=argv)
if not len(args) == 1:
parser.error('An XML file or DSPL zip archive is required')
return {'verbose': options.verbose,
'checking_level': options.checking_level,
'file_path': args[0]}
def GetInputFilePath(input_file_path):
"""Parse the input file path, extracting a zip file if necessary.
Args:
input_file_path: String path to dsplcheck input file
Returns:
Dictionary containing final XML file path (post-extraction) and directory
into which zip was extracted (or '' if input was not a zip).
"""
if zipfile.is_zipfile(input_file_path):
# Extract files to temporary directory and search for dataset XML
zip_dir = tempfile.mkdtemp()
zip_file = zipfile.ZipFile(input_file_path, 'r')
zip_file.extractall(zip_dir)
xml_file_paths = []
for (dirpath, unused_dirnames, filenames) in os.walk(zip_dir):
for file_name in filenames:
if file_name[-4:] == '.xml':
xml_file_paths.append(os.path.join(dirpath, file_name))
if not xml_file_paths:
print 'Error: zip does not have any XML files'
sys.exit(2)
elif len(xml_file_paths) > 1:
print 'Error: zip contains multiple XML files'
sys.exit(2)
else:
xml_file_path = xml_file_paths[0]
zip_file.close()
else:
xml_file_path = input_file_path
zip_dir = ''
return {'xml_file_path': xml_file_path,
'zip_dir': zip_dir}
def main(argv):
"""Parse command-line flags and run XML validator.
Args:
argv: The program argument vector (excluding the script name)
"""
start_time = time.time()
options = LoadOptionsFromFlags(argv)
file_paths = GetInputFilePath(options['file_path'])
try:
xml_file = open(file_paths['xml_file_path'], 'r')
except IOError as io_error:
print 'Error opening XML file\n\n%s' % io_error
sys.exit(2)
if options['verbose']:
print '==== Checking XML file against DSPL schema....'
result = xml_validation.RunValidation(
xml_file,
verbose=options['verbose'])
print result
if 'validates successfully' not in result:
# Stop if XML validation not successful
sys.exit(2)
if options['checking_level'] != 'schema_only':
if options['verbose']:
print '\n==== Parsing DSPL dataset....'
if options['checking_level'] == 'full':
full_data_check = True
else:
full_data_check = False
try:
dataset = dspl_model_loader.LoadDSPLFromFiles(
file_paths['xml_file_path'], load_all_data=full_data_check)
except dspl_model_loader.DSPLModelLoaderError as loader_error:
print 'Error while trying to parse DSPL dataset\n\n%s' % loader_error
sys.exit(2)
if options['verbose']:
print 'Parsing completed.'
if full_data_check:
print '\n==== Checking DSPL model and data....'
else:
print '\n==== Checking DSPL model....'
dspl_validator = dspl_validation.DSPLDatasetValidator(
dataset, full_data_check=full_data_check)
print dspl_validator.RunValidation(options['verbose'])
xml_file.close()
if file_paths['zip_dir']:
shutil.rmtree(file_paths['zip_dir'])
if options['verbose']:
print '\nCompleted in %0.2f seconds' % (time.time() - start_time)
if __name__ == '__main__':
main(sys.argv[1:])
| Python |
#!/usr/bin/python2.4
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests of dsplgen module."""
__author__ = 'Benjamin Yolken <yolken@google.com>'
import os
import os.path
import re
import shutil
import StringIO
import sys
import tempfile
import unittest
import dsplcheck
import dsplgen
_TEST_CSV_CONTENT = (
"""date[type=date;format=yyyy-MM-dd],category1,category2[concept=geo:us_state;rollup=true],metric1[extends=quantity:ratio;slice_role=metric],metric2,metric3
1980-01-01,red,california,89,321,71.21
1981-01-01,red,california,99,231,391.2
1982-01-01,blue,maine's,293,32,2.31
1983-01-01,blue,california,293,12,10.3
1984-01-01,red,maine's,932,48,10.78""")
class DSPLGenTests(unittest.TestCase):
"""Test cases for dsplgen module."""
def setUp(self):
self.input_dir = tempfile.mkdtemp()
input_file = open(os.path.join(self.input_dir, 'input.csv'), 'w')
input_file.write(_TEST_CSV_CONTENT)
input_file.close()
self.output_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.input_dir)
shutil.rmtree(self.output_dir)
def testDSPLGenEndToEnd(self):
"""A simple end-to-end test of the dsplgen application."""
dsplgen.main(['-o', self.output_dir, '-q',
os.path.join(self.input_dir, 'input.csv')])
self.assertTrue(
os.path.isfile(os.path.join(self.output_dir, 'dataset.xml')))
self.assertTrue(
os.path.isfile(os.path.join(self.output_dir, 'category1_table.csv')))
self.assertTrue(
os.path.isfile(os.path.join(self.output_dir, 'slice_0_table.csv')))
self.assertTrue(
os.path.isfile(os.path.join(self.output_dir, 'slice_1_table.csv')))
# Test that output validates against dsplcheck
saved_stdout = sys.stdout
redirected_output = StringIO.StringIO()
sys.stdout = redirected_output
dsplcheck.main([os.path.join(self.output_dir, 'dataset.xml')])
self.assertTrue(
re.search(
'validates successfully.*Parsing completed.*'
'No issues found.*Completed',
redirected_output.getvalue(), re.DOTALL))
redirected_output.close()
sys.stdout = saved_stdout
def testCSVNotFound(self):
"""Test case in which CSV can't be opened."""
dsplgen.main(['-o', self.output_dir, '-q',
os.path.join(self.input_dir, 'input.csv')])
saved_stdout = sys.stdout
redirected_output = StringIO.StringIO()
sys.stdout = redirected_output
self.assertRaises(SystemExit,
dsplgen.main, ['-q', 'non_existent_input_file.csv'])
self.assertTrue('Error opening CSV file' in redirected_output.getvalue())
redirected_output.close()
sys.stdout = saved_stdout
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python2.4
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Generate a DSPL dataset from a tabular data source via the command-line."""
__author__ = 'Benjamin Yolken <yolken@google.com>'
import optparse
import sys
import time
from dspllib.data_sources import csv_data_source
from dspllib.data_sources import csv_data_source_sqlite
from dspllib.data_sources import data_source_to_dspl
def LoadOptionsFromFlags(argv):
"""Parse command-line arguments.
Args:
argv: The program argument vector (excluding the script name)
Returns:
A dictionary with key-value pairs for each of the options
"""
usage_string = 'python dsplgen.py [options] [csv file]'
parser = optparse.OptionParser(usage=usage_string)
parser.set_defaults(verbose=True)
parser.add_option('-o', '--output_path', dest='output_path', default='',
help=('Path to a output directory '
'(default: current directory)'))
parser.add_option('-q', '--quiet',
action='store_false', dest='verbose',
help='Quiet mode')
parser.add_option('-t', '--data_type', dest='data_type', type='choice',
choices=['csv', 'csv_sqlite'], default='csv',
help='Type of data source to use (default: csv)')
(options, args) = parser.parse_args(args=argv)
if not len(args) == 1:
parser.error('A data source (e.g., path to CSV file) is required')
return {'data_type': options.data_type,
'data_source': args[0],
'output_path': options.output_path,
'verbose': options.verbose}
def main(argv):
"""Parse command-line flags and run data source to DSPL conversion process.
Args:
argv: The program argument vector (excluding the script name)
"""
start_time = time.time()
options = LoadOptionsFromFlags(argv)
# Connect to data source
if options['data_type'] in ['csv', 'csv_sqlite']:
try:
csv_file = open(options['data_source'], 'r')
except IOError as io_error:
print 'Error opening CSV file\n\n%s' % io_error
sys.exit(2)
if options['data_type'] == 'csv':
data_source_obj = csv_data_source.CSVDataSource(
csv_file, options['verbose'])
else:
data_source_obj = csv_data_source_sqlite.CSVDataSourceSqlite(
csv_file, options['verbose'])
else:
print 'Error: Unknown data type: %s' % (options['data_type'])
sys.exit(2)
# Create DSPL dataset from data source
dataset = data_source_to_dspl.PopulateDataset(
data_source_obj, options['verbose'])
data_source_obj.Close()
if options['verbose']:
print 'Materializing dataset:'
print str(dataset)
# Write DSPL dataset to disk
dataset.Materialize(options['output_path'])
if options['verbose']:
print 'Completed in %0.2f seconds' % (time.time() - start_time)
if __name__ == '__main__':
main(sys.argv[1:])
| Python |
#!/usr/bin/python2.4
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Setup script for the DSPLtools suite."""
from distutils.core import setup
setup(name='dspltools',
version='0.4.3',
description='Suite of command-line tools for generating DSPL datasets',
author='Benjamin Yolken',
author_email='yolken@google.com',
url='http://code.google.com/apis/publicdata/docs/dspltools.html',
packages=['dspllib', 'dspllib.data_sources',
'dspllib.model', 'dspllib.validation', 'genxmlif',
'minixsv'],
package_dir={'dspllib': 'packages/dspllib',
'genxmlif': 'packages/third_party/minixsv/genxmlif',
'minixsv': 'packages/third_party/minixsv/minixsv'},
package_data={'dspllib.validation': ['schemas/*.xsd',
'test_dataset/*.csv',
'test_dataset/*.xml'],
'minixsv': ['*.xsd', 'minixsv']},
scripts=['scripts/dsplcheck.py', 'scripts/dsplgen.py',
'scripts/run_all_tests.py'],)
| Python |
import os
import sys
from distutils.core import setup
from distutils.command.install import INSTALL_SCHEMES
#install data file in the same way as *.py
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
def fullsplit(path, result=None):
"""
Split a pathname into components (the opposite of os.path.join) in a
platform-neutral way.
"""
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
for dirpath, dirnames, filenames in os.walk('bots'):
# Ignore dirnames that start with '.'
#~ for i, dirname in enumerate(dirnames):
#~ if dirname.startswith('.'): del dirnames[i]
if '__init__.py' in filenames:
packages.append('.'.join(fullsplit(dirpath)))
if len(filenames) > 1:
data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames if not f.endswith('.pyc') and not f.endswith('.py')]])
elif filenames:
data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames if not f.endswith('.pyc') and not f.endswith('.py')]])
setup(
name="bots",
version="2.1.0",
author = "hjebbers",
author_email = "hjebbers@gmail.com",
url = "http://bots.sourceforge.net/",
description="Bots open source edi translator",
long_description = "Bots is complete software for edi (Electronic Data Interchange): translate and communicate. All major edi data formats are supported: edifact, x12, tradacoms, xml",
platforms="OS Independent (Written in an interpreted language)",
license="GNU General Public License (GPL)",
classifiers = [
'Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
'Programming Language :: Python',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Topic :: Office/Business',
'Topic :: Office/Business :: Financial',
'Topic :: Other/Nonlisted Topic',
'Topic :: Communications',
'Environment :: Console',
'Environment :: Web Environment',
],
scripts = [ 'bots-webserver.py',
'bots-engine.py',
'bots-grammarcheck.py',
'bots-xml2botsgrammar.py',
#~ 'bots/bots-updatedb.py',
],
packages = packages,
data_files = data_files,
)
| Python |
import os
import sys
from distutils.core import setup
from distutils.command.install import INSTALL_SCHEMES
#install data file in the same way as *.py
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
def fullsplit(path, result=None):
"""
Split a pathname into components (the opposite of os.path.join) in a
platform-neutral way.
"""
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
for dirpath, dirnames, filenames in os.walk('bots'):
# Ignore dirnames that start with '.'
#~ for i, dirname in enumerate(dirnames):
#~ if dirname.startswith('.'): del dirnames[i]
if '__init__.py' in filenames:
packages.append('.'.join(fullsplit(dirpath)))
if len(filenames) > 1:
data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames if not f.endswith('.pyc') and not f.endswith('.py')]])
elif filenames:
data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames if not f.endswith('.pyc') and not f.endswith('.py')]])
setup(
name="bots",
version="2.1.0",
author = "hjebbers",
author_email = "hjebbers@gmail.com",
url = "http://bots.sourceforge.net/",
description="Bots open source edi translator",
long_description = "Bots is complete software for edi (Electronic Data Interchange): translate and communicate. All major edi data formats are supported: edifact, x12, tradacoms, xml",
platforms="OS Independent (Written in an interpreted language)",
license="GNU General Public License (GPL)",
classifiers = [
'Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
'Programming Language :: Python',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Topic :: Office/Business',
'Topic :: Office/Business :: Financial',
'Topic :: Other/Nonlisted Topic',
'Topic :: Communications',
'Environment :: Console',
'Environment :: Web Environment',
],
scripts = [ 'bots-webserver.py',
'bots-engine.py',
'bots-grammarcheck.py',
'bots-xml2botsgrammar.py',
#~ 'bots/bots-updatedb.py',
],
packages = packages,
data_files = data_files,
)
| Python |
#!/usr/bin/env python
from bots import webserver
if __name__ == '__main__':
webserver.start()
| Python |
import unittest
import bots.botsglobal as botsglobal
import bots.inmessage as inmessage
import bots.botslib as botslib
import bots.transform as transform
import pickle
import bots.botsinit as botsinit
import utilsunit
'''plugin unittranslateutils.zip '''
#as the max length is
class TestTranslate(unittest.TestCase):
def setUp(self):
pass
def testpersist(self):
#~ inn = inmessage.edifromfile(editype='edifact',messagetype='orderswithenvelope',filename='botssys/infile/tests/inisout02.edi')
domein=u'test'
botskey=u'test'
value= u'xxxxxxxxxxxxxxxxx'
value2= u'IEFJUKAHE*FMhrt4hr f.wch shjeriw'
value3= u'1'*3024
transform.persist_delete(domein,botskey)
#~ self.assertRaises(botslib.PersistError,transform.persist_add,domein,botskey,value3) #content is too long
transform.persist_add(domein,botskey,value)
self.assertRaises(botslib.PersistError,transform.persist_add,domein,botskey,value) #is already present
self.assertEqual(value,transform.persist_lookup(domein,botskey),'basis')
transform.persist_update(domein,botskey,value2)
self.assertEqual(value2,transform.persist_lookup(domein,botskey),'basis')
#~ self.assertRaises(botslib.PersistError,transform.persist_update,domein,botskey,value3) #content is too long
transform.persist_delete(domein,botskey)
self.assertEqual(None,transform.persist_lookup(domein,botskey),'basis')
transform.persist_update(domein,botskey,value) #test-tet is not there. gives no error...
def testpersistunicode(self):
domein=u'test'
botskey=u'1235:\ufb52\ufb66\ufedb'
value= u'xxxxxxxxxxxxxxxxx'
value2= u'IEFJUKAHE*FMhr\u0302\u0267t4hr f.wch shjeriw'
value3= u'1'*1024
transform.persist_delete(domein,botskey)
#~ self.assertRaises(botslib.PersistError,transform.persist_add,domein,botskey,value3) #content is too long
transform.persist_add(domein,botskey,value)
self.assertRaises(botslib.PersistError,transform.persist_add,domein,botskey,value) #is already present
self.assertEqual(value,transform.persist_lookup(domein,botskey),'basis')
transform.persist_update(domein,botskey,value2)
self.assertEqual(value2,transform.persist_lookup(domein,botskey),'basis')
#~ self.assertRaises(botslib.PersistError,transform.persist_update,domein,botskey,value3) #content is too long
transform.persist_delete(domein,botskey)
self.assertEqual(None,transform.persist_lookup(domein,botskey),'basis')
transform.persist_update(domein,botskey,value) #is not there. gives no error...
def testcodeconversion(self):
self.assertEqual('TESTOUT',transform.codeconversion('aperakrff2qualifer','TESTIN'),'basis')
self.assertRaises(botslib.CodeConversionError,transform.codeconversion,'aperakrff2qualifer','TESTINNOT')
self.assertEqual('TESTIN',transform.rcodeconversion('aperakrff2qualifer','TESTOUT'),'basis')
self.assertRaises(botslib.CodeConversionError,transform.rcodeconversion,'aperakrff2qualifer','TESTINNOT')
#need article in ccodelist:
self.assertEqual('TESTOUT',transform.codetconversion('artikel','TESTIN'),'basis')
self.assertRaises(botslib.CodeConversionError,transform.codetconversion,'artikel','TESTINNOT')
self.assertEqual('TESTIN',transform.rcodetconversion('artikel','TESTOUT'),'basis')
self.assertRaises(botslib.CodeConversionError,transform.rcodetconversion,'artikel','TESTINNOT')
self.assertEqual('TESTATTR1',transform.codetconversion('artikel','TESTIN','attr1'),'basis')
def testunique(self):
newdomain = 'test' + transform.unique('test')
self.assertEqual('1',transform.unique(newdomain),'init new domain')
self.assertEqual('2',transform.unique(newdomain),'next one')
def testunique(self):
newdomain = 'test' + transform.unique('test')
self.assertEqual(True,transform.checkunique(newdomain,1),'init new domain')
self.assertEqual(False,transform.checkunique(newdomain,1),'seq should be 2')
self.assertEqual(False,transform.checkunique(newdomain,3),'seq should be 2')
self.assertEqual(True,transform.checkunique(newdomain,2),'next one')
def testean(self):
self.assertEqual('123456789012',transform.addeancheckdigit('12345678901'),'UPC')
self.assertEqual('2',transform.calceancheckdigit('12345678901'),'UPC')
self.assertEqual(True,transform.checkean('123456789012'),'UPC')
self.assertEqual(False,transform.checkean('123456789011'),'UPC')
self.assertEqual(False,transform.checkean('123456789013'),'UPC')
self.assertEqual('123456789012',transform.addeancheckdigit('12345678901'),'UPC')
self.assertEqual('2',transform.calceancheckdigit('12345678901'),'UPC')
self.assertEqual(True,transform.checkean('123456789012'),'UPC')
self.assertEqual(False,transform.checkean('123456789011'),'UPC')
self.assertEqual(False,transform.checkean('123456789013'),'UPC')
self.assertEqual('12345670',transform.addeancheckdigit('1234567'),'EAN8')
self.assertEqual('0',transform.calceancheckdigit('1234567'),'EAN8')
self.assertEqual(True,transform.checkean('12345670'),'EAN8')
self.assertEqual(False,transform.checkean('12345679'),'EAN8')
self.assertEqual(False,transform.checkean('12345671'),'EAN8')
self.assertEqual('1234567890128',transform.addeancheckdigit('123456789012'),'EAN13')
self.assertEqual('8',transform.calceancheckdigit('123456789012'),'EAN13')
self.assertEqual(True,transform.checkean('1234567890128'),'EAN13')
self.assertEqual(False,transform.checkean('1234567890125'),'EAN13')
self.assertEqual(False,transform.checkean('1234567890120'),'EAN13')
self.assertEqual('12345678901231',transform.addeancheckdigit('1234567890123'),'EAN14')
self.assertEqual('1',transform.calceancheckdigit('1234567890123'),'EAN14')
self.assertEqual(True,transform.checkean('12345678901231'),'EAN14')
self.assertEqual(False,transform.checkean('12345678901230'),'EAN14')
self.assertEqual(False,transform.checkean('12345678901236'),'EAN14')
self.assertEqual('123456789012345675',transform.addeancheckdigit('12345678901234567'),'UPC')
self.assertEqual('5',transform.calceancheckdigit('12345678901234567'),'UPC')
self.assertEqual(True,transform.checkean('123456789012345675'),'UPC')
self.assertEqual(False,transform.checkean('123456789012345670'),'UPC')
self.assertEqual(False,transform.checkean('123456789012345677'),'UPC')
if __name__ == '__main__':
botsinit.generalinit('config')
botsinit.initenginelogging()
botsinit.connect()
try:
unittest.main()
except:
pass
botsglobal.db.close()
| Python |
import unittest
import filecmp
import glob
import shutil
import os
import subprocess
import logging
import utilsunit
import bots.botslib as botslib
import bots.botsinit as botsinit
import bots.botsglobal as botsglobal
from bots.botsconfig import *
'''
plugin unitconfirm.zip
before each run: clear transactions!
'''
botssys = 'bots/botssys'
class TestMain(unittest.TestCase):
def testroutetestmdn(self):
lijst = utilsunit.getdir(os.path.join(botssys,'outfile/confirm/mdn/*'))
self.failUnless(len(lijst)==0)
nr_rows = 0
for row in botslib.query(u'''SELECT idta,confirmed,confirmidta
FROM ta
WHERE status=%(status)s
AND statust=%(statust)s
AND idroute=%(idroute)s
AND confirmtype=%(confirmtype)s
AND confirmasked=%(confirmasked)s
ORDER BY idta DESC
''',
{'status':210,'statust':DONE,'idroute':'testmdn','confirmtype':'send-email-MDN','confirmasked':True}):
nr_rows += 1
self.failUnless(row[1])
self.failUnless(row[2]!=0)
else:
self.failUnless(nr_rows==1)
nr_rows = 0
for row in botslib.query(u'''SELECT idta,confirmed,confirmidta
FROM ta
WHERE status=%(status)s
AND statust=%(statust)s
AND idroute=%(idroute)s
AND confirmtype=%(confirmtype)s
AND confirmasked=%(confirmasked)s
ORDER BY idta DESC
''',
{'status':510,'statust':DONE,'idroute':'testmdn','confirmtype':'ask-email-MDN','confirmasked':True}):
nr_rows += 1
self.failUnless(row[1])
self.failUnless(row[2]!=0)
else:
self.failUnless(nr_rows==1)
def testroutetestmdn2(self):
lijst = utilsunit.getdir(os.path.join(botssys,'outfile/confirm/mdn2/*'))
self.failUnless(len(lijst)==0)
nr_rows = 0
for row in botslib.query(u'''SELECT idta,confirmed,confirmidta
FROM ta
WHERE status=%(status)s
AND statust=%(statust)s
AND idroute=%(idroute)s
AND confirmtype=%(confirmtype)s
AND confirmasked=%(confirmasked)s
ORDER BY idta DESC
''',
{'status':510,'statust':DONE,'idroute':'testmdn2','confirmtype':'ask-email-MDN','confirmasked':True}):
nr_rows += 1
self.failUnless(not row[1])
self.failUnless(row[2]==0)
else:
self.failUnless(nr_rows==1)
def testrouteotherx12(self):
lijst = utilsunit.getdir(os.path.join(botssys,'outfile/confirm/otherx12/*'))
self.failUnless(len(lijst)==15)
def testroutetest997(self):
'''
test997 1: pickup 850*1 ask confirm 850*2 gen & send 850*2
send confirm 850*1 gen & send 997*1
test997 2: receive 997*1 confirm 850*1 gen xml*1
receive 850*2 ask confirm 850*3 gen 850*3
send confirm 850*2 gen & send 997*2
test997 3: receive 997*2 confirm 850*2 gen & send xml (to trash)
send 850*3 (to trash)
send xml (to trash)
'''
lijst = utilsunit.getdir(os.path.join(botssys,'outfile/confirm/x12/*'))
self.failUnless(len(lijst)==0)
lijst = utilsunit.getdir(os.path.join(botssys,'outfile/confirm/trash/*'))
self.failUnless(len(lijst)==6)
counter=0
for row in botslib.query(u'''SELECT idta,confirmed,confirmidta
FROM ta
WHERE status=%(status)s
AND statust=%(statust)s
AND idroute=%(idroute)s
AND confirmtype=%(confirmtype)s
AND confirmasked=%(confirmasked)s
ORDER BY idta DESC
''',
{'status':400,'statust':DONE,'idroute':'test997','confirmtype':'ask-x12-997','confirmasked':True}):
counter += 1
if counter == 1:
self.failUnless(not row[1])
self.failUnless(row[2]==0)
elif counter == 2:
self.failUnless(row[1])
self.failUnless(row[2]!=0)
else:
break
else:
self.failUnless(counter!=0)
for row in botslib.query(u'''SELECT idta,confirmed,confirmidta
FROM ta
WHERE status=%(status)s
AND statust=%(statust)s
AND idroute=%(idroute)s
AND confirmtype=%(confirmtype)s
AND confirmasked=%(confirmasked)s
ORDER BY idta DESC
''',
{'status':310,'statust':DONE,'idroute':'test997','confirmtype':'send-x12-997','confirmasked':True}):
counter += 1
if counter <= 2:
self.failUnless(row[1])
self.failUnless(row[2]!=0)
else:
break
else:
self.failUnless(counter!=0)
def testroutetestcontrl(self):
'''
test997 1: pickup ORDERS*1 ask confirm ORDERS*2 gen & send ORDERS*2
send confirm ORDERS*1 gen & send CONTRL*1
test997 2: receive CONTRL*1 confirm ORDERS*1 gen xml*1
receive ORDERS*2 ask confirm ORDERS*3 gen ORDERS*3
send confirm ORDERS*2 gen & send CONTRL*2
test997 3: receive CONTRL*2 confirm ORDERS*2 gen & send xml (to trash)
send ORDERS*3 (to trash)
send xml (to trash)
'''
lijst = utilsunit.getdir(os.path.join(botssys,'outfile/confirm/edifact/*'))
self.failUnless(len(lijst)==0)
lijst = utilsunit.getdir(os.path.join(botssys,'outfile/confirm/trash/*'))
self.failUnless(len(lijst)==6)
counter=0
for row in botslib.query(u'''SELECT idta,confirmed,confirmidta
FROM ta
WHERE status=%(status)s
AND statust=%(statust)s
AND idroute=%(idroute)s
AND confirmtype=%(confirmtype)s
AND confirmasked=%(confirmasked)s
ORDER BY idta DESC
''',
{'status':400,'statust':DONE,'idroute':'testcontrl','confirmtype':'ask-edifact-CONTRL','confirmasked':True}):
counter += 1
if counter == 1:
self.failUnless(not row[1])
self.failUnless(row[2]==0)
elif counter == 2:
self.failUnless(row[1])
self.failUnless(row[2]!=0)
else:
break
else:
self.failUnless(counter!=0)
for row in botslib.query(u'''SELECT idta,confirmed,confirmidta
FROM ta
WHERE status=%(status)s
AND statust=%(statust)s
AND idroute=%(idroute)s
AND confirmtype=%(confirmtype)s
AND confirmasked=%(confirmasked)s
ORDER BY idta DESC
''',
{'status':310,'statust':DONE,'idroute':'testcontrl','confirmtype':'send-edifact-CONTRL','confirmasked':True}):
counter += 1
if counter <= 2:
self.failUnless(row[1])
self.failUnless(row[2]!=0)
else:
break
else:
self.failUnless(counter!=0)
if __name__ == '__main__':
pythoninterpreter = 'python'
newcommand = [pythoninterpreter,'bots-engine.py',]
shutil.rmtree(os.path.join(botssys, 'outfile'),ignore_errors=True) #remove whole output directory
subprocess.call(newcommand)
botsinit.generalinit('config')
botsinit.initenginelogging()
botsinit.connect()
print '''expect:
21 files received/processed in run.
17 files without errors,
4 files with errors,
30 files send in run.
'''
unittest.main()
logging.shutdown()
botsglobal.db.close()
| Python |
#!/usr/bin/env python
from bots import grammarcheck
if __name__=='__main__':
grammarcheck.start()
| Python |
import os
import unittest
import shutil
import bots.inmessage as inmessage
import bots.outmessage as outmessage
import filecmp
try:
import json as simplejson
except ImportError:
import simplejson
import bots.botslib as botslib
import bots.botsinit as botsinit
import utilsunit
''' pluging unitinisout.zip'''
class Testinisoutedifact(unittest.TestCase):
def testedifact02(self):
infile ='botssys/infile/unitinisout/org/inisout02.edi'
outfile='botssys/infile/unitinisout/output/inisout02.edi'
inn = inmessage.edifromfile(editype='edifact',messagetype='orderswithenvelope',filename=infile)
out = outmessage.outmessage_init(editype='edifact',messagetype='orderswithenvelope',filename=outfile,divtext='',topartner='') #make outmessage object
out.root = inn.root
out.writeall()
self.failUnless(filecmp.cmp('bots/' + outfile,'bots/' + infile))
def testedifact03(self):
#~ #takes quite long
infile ='botssys/infile/unitinisout/org/inisout03.edi'
outfile='botssys/infile/unitinisout/output/inisout03.edi'
inn = inmessage.edifromfile(editype='edifact',messagetype='invoicwithenvelope',filename=infile)
out = outmessage.outmessage_init(editype='edifact',messagetype='invoicwithenvelope',filename=outfile,divtext='',topartner='') #make outmessage object
out.root = inn.root
out.writeall()
self.failUnless(filecmp.cmp('bots/' + outfile,'bots/' + infile))
def testedifact04(self):
utilsunit.readwrite(editype='edifact',
messagetype='orderswithenvelope',
filenamein='botssys/infile/unitinisout/0406edifact/040601.edi',
filenameout='botssys/infile/unitinisout/output/040601.edi')
utilsunit.readwrite(editype='edifact',
messagetype='orderswithenvelope',
filenamein='botssys/infile/unitinisout/0406edifact/040602.edi',
filenameout='botssys/infile/unitinisout/output/040602.edi')
utilsunit.readwrite(editype='edifact',
messagetype='orderswithenvelope',
filenamein='botssys/infile/unitinisout/0406edifact/040603.edi',
filenameout='botssys/infile/unitinisout/output/040603.edi')
utilsunit.readwrite(editype='edifact',
messagetype='orderswithenvelope',
filenamein='botssys/infile/unitinisout/0406edifact/040604.edi',
filenameout='botssys/infile/unitinisout/output/040604.edi')
utilsunit.readwrite(editype='edifact',
messagetype='orderswithenvelope',
filenamein='botssys/infile/unitinisout/0406edifact/040605.edi',
filenameout='botssys/infile/unitinisout/output/040605.edi')
utilsunit.readwrite(editype='edifact',
messagetype='orderswithenvelope',
filenamein='botssys/infile/unitinisout/0406edifact/040606.edi',
filenameout='botssys/infile/unitinisout/output/040606.edi')
utilsunit.readwrite(editype='edifact',
messagetype='orderswithenvelope',
filenamein='botssys/infile/unitinisout/0406edifact/040607.edi',
filenameout='botssys/infile/unitinisout/output/040607.edi')
utilsunit.readwrite(editype='edifact',
messagetype='orderswithenvelope',
filenamein='botssys/infile/unitinisout/0406edifact/040608.edi',
filenameout='botssys/infile/unitinisout/output/040608.edi')
self.failUnless(filecmp.cmp('bots/botssys/infile/unitinisout/output/040601.edi','bots/botssys/infile/unitinisout/output/040602.edi'))
self.failUnless(filecmp.cmp('bots/botssys/infile/unitinisout/output/040601.edi','bots/botssys/infile/unitinisout/output/040603.edi'))
self.failUnless(filecmp.cmp('bots/botssys/infile/unitinisout/output/040601.edi','bots/botssys/infile/unitinisout/output/040604.edi'))
self.failUnless(filecmp.cmp('bots/botssys/infile/unitinisout/output/040601.edi','bots/botssys/infile/unitinisout/output/040605.edi'))
self.failUnless(filecmp.cmp('bots/botssys/infile/unitinisout/output/040601.edi','bots/botssys/infile/unitinisout/output/040606.edi'))
self.failUnless(filecmp.cmp('bots/botssys/infile/unitinisout/output/040601.edi','bots/botssys/infile/unitinisout/output/040607.edi'))
self.failUnless(filecmp.cmp('bots/botssys/infile/unitinisout/output/040601.edi','bots/botssys/infile/unitinisout/output/040608.edi'))
class Testinisoutinh(unittest.TestCase):
def testinh01(self):
filenamein='botssys/infile/unitinisout/org/inisout01.inh'
filenameout='botssys/infile/unitinisout/output/inisout01.inh'
inn = inmessage.edifromfile(editype='fixed',messagetype='invoicfixed',filename=filenamein)
out = outmessage.outmessage_init(editype='fixed',messagetype='invoicfixed',filename=filenameout,divtext='',topartner='') #make outmessage object
out.root = inn.root
out.writeall()
self.failUnless(filecmp.cmp('bots/' + filenameout,'bots/' + filenamein))
def testidoc01(self):
filenamein='botssys/infile/unitinisout/org/inisout01.idoc'
filenameout='botssys/infile/unitinisout/output/inisout01.idoc'
inn = inmessage.edifromfile(editype='idoc',messagetype='WP_PLU02',filename=filenamein)
out = outmessage.outmessage_init(editype='idoc',messagetype='WP_PLU02',filename=filenameout,divtext='',topartner='') #make outmessage object
out.root = inn.root
out.writeall()
self.failUnless(filecmp.cmp('bots/' + filenameout,'bots/' + filenamein))
class Testinisoutx12(unittest.TestCase):
def testx12_01(self):
filenamein='botssys/infile/unitinisout/org/inisout01.x12'
filenameout='botssys/infile/unitinisout/output/inisout01.x12'
inn = inmessage.edifromfile(editype='x12',messagetype='850withenvelope',filename=filenamein)
out = outmessage.outmessage_init(editype='x12',messagetype='850withenvelope',filename=filenameout,divtext='',topartner='') #make outmessage object
out.root = inn.root
out.writeall()
linesfile1 = utilsunit.readfilelines('bots/' + filenamein)
linesfile2 = utilsunit.readfilelines('bots/' + filenameout)
self.assertEqual(linesfile1[0][:103],linesfile2[0][:103],'first part of ISA')
for line1,line2 in zip(linesfile1[1:],linesfile2[1:]):
self.assertEqual(line1,line2,'Cmplines')
def testx12_02(self):
filenamein='botssys/infile/unitinisout/org/inisout02.x12'
filenameout='botssys/infile/unitinisout/output/inisout02.x12'
inn = inmessage.edifromfile(editype='x12',messagetype='850withenvelope',filename=filenamein)
out = outmessage.outmessage_init(add_crlfafterrecord_sep='',editype='x12',messagetype='850withenvelope',filename=filenameout,divtext='',topartner='') #make outmessage object
out.root = inn.root
out.writeall()
linesfile1 = utilsunit.readfile('bots/' + filenamein)
linesfile2 = utilsunit.readfile('bots/' + filenameout)
self.assertEqual(linesfile1[:103],linesfile2[:103],'first part of ISA')
self.assertEqual(linesfile1[105:],linesfile2[103:],'rest of message')
class Testinisoutcsv(unittest.TestCase):
def testcsv001(self):
filenamein='botssys/infile/unitinisout/org/inisout01.csv'
filenameout='botssys/infile/unitinisout/output/inisout01.csv'
utilsunit.readwrite(editype='csv',messagetype='invoic',filenamein=filenamein,filenameout=filenameout)
self.failUnless(filecmp.cmp('bots/' + filenameout,'bots/' + filenamein))
def testcsv003(self):
#utf-charset
filenamein='botssys/infile/unitinisout/org/inisout03.csv'
filenameout='botssys/infile/unitinisout/output/inisout03.csv'
utilsunit.readwrite(editype='csv',messagetype='invoic',filenamein=filenamein,filenameout=filenameout)
self.failUnless(filecmp.cmp('bots/' + filenameout,'bots/' + filenamein))
#~ #utf-charset with BOM **error. BOM is not removed by python.
#~ #utilsunit.readwrite(editype='csv',
#~ # messagetype='invoic',
#~ # filenamein='botssys/infile/unitinisout/inisout04.csv',
#~ # filenameout='botssys/infile/unitinisout/output/inisout04.csv')
#~ #self.failUnless(filecmp.cmp('botssys/infile/unitinisout/output/inisout04.csv','botssys/infile/unitinisout/inisout04.csv'))
if __name__ == '__main__':
botsinit.generalinit('config')
#~ botslib.initbotscharsets()
botsinit.initenginelogging()
shutil.rmtree('bots/botssys/infile/unitinisout/output',ignore_errors=True) #remove whole output directory
os.mkdir('bots/botssys/infile/unitinisout/output')
unittest.main()
| Python |
import unittest
import bots.botslib as botslib
import bots.botsinit as botsinit
import bots.inmessage as inmessage
import bots.outmessage as outmessage
from bots.botsconfig import *
import utilsunit
''' plugin unitformats '''
#python 2.6 treats -0 different. in outmessage this is adapted, for inmessage: python 2.6 does this correct
testdummy={MPATH:'dummy for tests'}
class TestFormatFieldVariableOutmessage(unittest.TestCase):
def setUp(self):
self.edi = outmessage.outmessage_init(messagetype='edifact',editype='edifact')
def test_out_formatfield_var_R(self):
self.edi.ta_info['lengthnumericbare']=True
self.edi.ta_info['decimaal']='.'
tfield1 = ['TEST1','M',3,'R',True,0, 0, 'R']
# length decimals minlength format
self.assertEqual(self.edi._formatfield('',tfield1,testdummy), '','empty string')
self.assertEqual(self.edi._formatfield('1',tfield1,testdummy), '1', 'basic')
self.assertEqual(self.edi._formatfield(' 1',tfield1,testdummy), '1', 'basic')
self.assertEqual(self.edi._formatfield('1 ',tfield1,testdummy), '1', 'basic')
self.assertEqual(self.edi._formatfield('0',tfield1,testdummy), '0','zero stays zero')
self.assertEqual(self.edi._formatfield('-0',tfield1,testdummy), '-0','neg.zero stays neg.zero')
self.assertEqual(self.edi._formatfield('-0.00',tfield1,testdummy), '-0.00','')
self.assertEqual(self.edi._formatfield('-.12',tfield1,testdummy), '-0.12','no zero before dec,sign is OK')
self.assertEqual(self.edi._formatfield('123',tfield1,testdummy), '123','numeric field at max')
self.assertEqual(self.edi._formatfield('001',tfield1,testdummy), '1','leading zeroes are removed')
self.assertEqual(self.edi._formatfield('0.10',tfield1,testdummy), '0.10','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('-1.23',tfield1,testdummy), '-1.23','numeric field at max with minus and decimal sign')
self.assertEqual(self.edi._formatfield('0001',tfield1,testdummy), '1','strips leading zeroes if possobel')
self.assertEqual(self.edi._formatfield('+123',tfield1,testdummy), '123','strips leading zeroes if possobel')
self.edi.ta_info['decimaal']=','
self.assertEqual(self.edi._formatfield('1.23',tfield1,testdummy), '1,23','other dec.sig, replace')
self.edi.ta_info['decimaal']='.'
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1234',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'-1.234',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1<3',tfield1,testdummy) #wrong char
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1-3',tfield1,testdummy) #'-' in middel of number
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'123-',tfield1,testdummy) #'-' at end of number
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1,3',tfield1,testdummy) #',', where ',' is not traid sep.
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1+3',tfield1,testdummy) #'+' in middle of number (no exp)
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1E3',tfield1,testdummy) #'+' in middle of number (no exp)
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'13+',tfield1,testdummy) #'+' at end
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'0.100',tfield1,testdummy) #field too big
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'.',tfield1,testdummy) #no num
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'-',tfield1,testdummy) #no num
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'.001',tfield1,testdummy) #bots adds 0 before dec, thus too big
#~ #test filling up to min length
tfield2 = ['TEST1', 'M', 8, 'R', True, 0, 5,'R']
self.assertEqual(self.edi._formatfield('12345',tfield2,testdummy), '12345','just large enough')
self.assertEqual(self.edi._formatfield('0.1000',tfield2,testdummy), '0.1000','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('00001',tfield2,testdummy), '00001','keep leading zeroes')
self.assertEqual(self.edi._formatfield('123',tfield2,testdummy), '00123','add leading zeroes')
self.assertEqual(self.edi._formatfield('.1',tfield2,testdummy), '0000.1','add leading zeroes')
#~ #test exp
self.assertEqual(self.edi._formatfield('12E+3',tfield2,testdummy), '12000','Exponent notation is possible')
self.assertEqual(self.edi._formatfield('12E3',tfield2,testdummy), '12000','Exponent notation is possible->to std notation')
self.assertEqual(self.edi._formatfield('12e+3',tfield2,testdummy), '12000','Exponent notation is possible; e->E')
self.assertEqual(self.edi._formatfield('12e3',tfield2,testdummy), '12000','Exponent notation is possible; e->E')
self.assertEqual(self.edi._formatfield('12345E+3',tfield2,testdummy), '12345000','do not count + and E')
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'12345678E+3',tfield2,testdummy) #too big with exp
tfield3 = ['TEST1', 'M', 8, 'R', True, 3, 5,'R']
#~ print '\n>>>',self.edi._formatfield('12E-3',tfield3,testdummy)
#~ self.assertEqual(self.edi._formatfield('12E-3',tfield3,testdummy), '00.012','Exponent notation is possible')
#~ self.assertEqual(self.edi._formatfield('12e-3',tfield2,testdummy), '00.012','Exponent notation is possible; e->E')
#~ self.assertEqual(self.edi._formatfield('12345678E-3',tfield2,testdummy), '12345.678','do not count + and E')
#~ self.assertEqual(self.edi._formatfield('12345678E-7',tfield2,testdummy), '1.2345678','do not count + and E')
#~ self.assertEqual(self.edi._formatfield('123456E-7',tfield2,testdummy), '0.0123456','do not count + and E')
#~ self.assertEqual(self.edi._formatfield('1234567E-7',tfield2,testdummy), '0.1234567','do not count + and E')
#~ self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'12345678E-8',tfield2,testdummy) #gets 0.12345678, is too big
#~ self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'12345678E+3',tfield2,testdummy) #too big with exp
tfield4 = ['TEST1', 'M', 80, 'R', True, 3, 0,'R']
self.assertEqual(self.edi._formatfield('12345678901234560',tfield4,testdummy), '12345678901234560','lot of digits')
#test for lentgh checks if:
self.edi.ta_info['lengthnumericbare']=False
self.assertEqual(self.edi._formatfield('-1.45',tfield2,testdummy), '-1.45','just large enough')
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'-12345678',tfield2,testdummy) #field too large
def test_out_formatfield_var_N(self):
self.edi.ta_info['decimaal']='.'
self.edi.ta_info['lengthnumericbare']=True
tfield1 = ['TEST1','M',5,'N',True,2, 0, 'N']
# length decimals minlength format
self.assertEqual(self.edi._formatfield('',tfield1,testdummy), '','empty string')
self.assertEqual(self.edi._formatfield('1',tfield1,testdummy), '1.00', 'basic')
self.assertEqual(self.edi._formatfield(' 1',tfield1,testdummy), '1.00', 'basic')
self.assertEqual(self.edi._formatfield('1 ',tfield1,testdummy), '1.00', 'basic')
self.assertEqual(self.edi._formatfield('0',tfield1,testdummy), '0.00','zero stays zero')
self.assertEqual(self.edi._formatfield('-0',tfield1,testdummy), '-0.00','neg.zero stays neg.zero')
self.assertEqual(self.edi._formatfield('-0.00',tfield1,testdummy), '-0.00','')
self.assertEqual(self.edi._formatfield('-0.001',tfield1,testdummy), '-0.00','')
self.assertEqual(self.edi._formatfield('-.12',tfield1,testdummy), '-0.12','no zero before dec,sign is OK')
self.assertEqual(self.edi._formatfield('123',tfield1,testdummy), '123.00','numeric field at max')
self.assertEqual(self.edi._formatfield('001',tfield1,testdummy), '1.00','leading zeroes are removed')
self.assertEqual(self.edi._formatfield('0.10',tfield1,testdummy), '0.10','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('123.1049',tfield1,testdummy), '123.10','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('-1.23',tfield1,testdummy), '-1.23','numeric field at max with minus and decimal sign')
self.assertEqual(self.edi._formatfield('0001',tfield1,testdummy), '1.00','strips leading zeroes if possobel')
self.assertEqual(self.edi._formatfield('+123',tfield1,testdummy), '123.00','strips leading zeroes if possobel')
self.edi.ta_info['decimaal']=','
self.assertEqual(self.edi._formatfield('1.23',tfield1,testdummy), '1,23','other dec.sig, replace')
self.edi.ta_info['decimaal']='.'
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1234',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'-1234.56',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1<3',tfield1,testdummy) #wrong char
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1-3',tfield1,testdummy) #'-' in middel of number
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'123-',tfield1,testdummy) #'-' at end of number
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1,3',tfield1,testdummy) #',', where ',' is not traid sep.
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1+3',tfield1,testdummy) #'+' in middle of number (no exp)
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1E3',tfield1,testdummy) #'+' in middle of number (no exp)
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'13+',tfield1,testdummy) #'+' at end
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1234.100',tfield1,testdummy) #field too big
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'.',tfield1,testdummy) #no num
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'-',tfield1,testdummy) #no num
# #test filling up to min length
tfield23 = ['TEST1', 'M', 8, 'N', True, 0, 5,'N']
#~ print self.edi._formatfield('12345.5',tfield23,testdummy)
self.assertEqual(self.edi._formatfield('12345.5',tfield23,testdummy), '12346','just large enough')
tfield2 = ['TEST1', 'M', 8, 'N', True, 2, 5,'N']
self.assertEqual(self.edi._formatfield('123.45',tfield2,testdummy), '123.45','just large enough')
self.assertEqual(self.edi._formatfield('123.4549',tfield2,testdummy), '123.45','just large enough')
#~ print self.edi._formatfield('123.455',tfield2,testdummy)
self.assertEqual(self.edi._formatfield('123.455',tfield2,testdummy), '123.46','just large enough')
self.assertEqual(self.edi._formatfield('0.1000',tfield2,testdummy), '000.10','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('00001',tfield2,testdummy), '001.00','keep leading zeroes')
self.assertEqual(self.edi._formatfield('12',tfield2,testdummy), '012.00','add leading zeroes')
self.assertEqual(self.edi._formatfield('.1',tfield2,testdummy), '000.10','add leading zeroes')
#test exp; bots tries to convert to normal
self.assertEqual(self.edi._formatfield('178E+3',tfield2,testdummy), '178000.00','add leading zeroes')
self.assertEqual(self.edi._formatfield('-178E+3',tfield2,testdummy), '-178000.00','add leading zeroes')
self.assertEqual(self.edi._formatfield('-178e-3',tfield2,testdummy), '-000.18','add leading zeroes')
self.assertEqual(self.edi._formatfield('-178e-5',tfield2,testdummy), '-000.00','add leading zeroes')
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'178E+4',tfield2,testdummy) #too big with exp
tfield4 = ['TEST1', 'M', 80, 'N', True, 3, 0,'N']
self.assertEqual(self.edi._formatfield('12345678901234560',tfield4,testdummy), '12345678901234560.000','lot of digits')
self.assertEqual(self.edi._formatfield('1234567890123456789012345',tfield4,testdummy), '1234567890123456789012345.000','lot of digits')
def test_out_formatfield_var_I(self):
self.edi.ta_info['lengthnumericbare']=True
self.edi.ta_info['decimaal']='.'
tfield1 = ['TEST1','M',5,'I',True,2, 0, 'I']
# length decimals minlength format
self.assertEqual(self.edi._formatfield('',tfield1,testdummy), '','empty string')
self.assertEqual(self.edi._formatfield('1',tfield1,testdummy), '100', 'basic')
self.assertEqual(self.edi._formatfield(' 1',tfield1,testdummy), '100', 'basic')
self.assertEqual(self.edi._formatfield('1 ',tfield1,testdummy), '100', 'basic')
self.assertEqual(self.edi._formatfield('0',tfield1,testdummy), '0','zero stays zero')
self.assertEqual(self.edi._formatfield('-0',tfield1,testdummy), '-0','neg.zero stays neg.zero')
self.assertEqual(self.edi._formatfield('-0.00',tfield1,testdummy), '-0','')
self.assertEqual(self.edi._formatfield('-0.001',tfield1,testdummy), '-0','')
self.assertEqual(self.edi._formatfield('-.12',tfield1,testdummy), '-12','no zero before dec,sign is OK') #TODO: puts ) in front
self.assertEqual(self.edi._formatfield('123',tfield1,testdummy), '12300','numeric field at max')
self.assertEqual(self.edi._formatfield('001',tfield1,testdummy), '100','leading zeroes are removed')
self.assertEqual(self.edi._formatfield('0.10',tfield1,testdummy), '10','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('123.1049',tfield1,testdummy), '12310','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('-1.23',tfield1,testdummy), '-123','numeric field at max with minus and decimal sign')
self.assertEqual(self.edi._formatfield('0001',tfield1,testdummy), '100','strips leading zeroes if possobel')
self.assertEqual(self.edi._formatfield('+123',tfield1,testdummy), '12300','strips leading zeroes if possobel')
self.edi.ta_info['decimaal']=','
self.assertEqual(self.edi._formatfield('1.23',tfield1,testdummy), '123','other dec.sig, replace')
self.edi.ta_info['decimaal']='.'
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1234',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'-1234.56',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1<3',tfield1,testdummy) #wrong char
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1-3',tfield1,testdummy) #'-' in middel of number
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'123-',tfield1,testdummy) #'-' at end of number
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1,3',tfield1,testdummy) #',', where ',' is not traid sep.
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1+3',tfield1,testdummy) #'+' in middle of number (no exp)
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1E3',tfield1,testdummy) #'+' in middle of number (no exp)
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'13+',tfield1,testdummy) #'+' at end
self.assertEqual(self.edi._formatfield('+13',tfield1,testdummy), '1300','other dec.sig, replace')
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1+3',tfield1,testdummy) #'+' in middle of number
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1234.100',tfield1,testdummy) #field too big
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'.',tfield1,testdummy) #no num
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'-',tfield1,testdummy) #no num
#~ #test filling up to min length
tfield2 = ['TEST1', 'M', 8, 'I', True, 2, 5,'I']
self.assertEqual(self.edi._formatfield('123.45',tfield2,testdummy), '12345','just large enough')
self.assertEqual(self.edi._formatfield('123.4549',tfield2,testdummy), '12345','just large enough')
self.assertEqual(self.edi._formatfield('123.455',tfield2,testdummy), '12346','just large enough')
self.assertEqual(self.edi._formatfield('0.1000',tfield2,testdummy), '00010','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('00001',tfield2,testdummy), '00100','keep leading zeroes')
self.assertEqual(self.edi._formatfield('12',tfield2,testdummy), '01200','add leading zeroes')
self.assertEqual(self.edi._formatfield('.1',tfield2,testdummy), '00010','add leading zeroes')
#test exp; bots tries to convert to normal
self.assertEqual(self.edi._formatfield('178E+3',tfield2,testdummy), '17800000','add leading zeroes')
self.assertEqual(self.edi._formatfield('-178E+3',tfield2,testdummy), '-17800000','add leading zeroes')
self.assertEqual(self.edi._formatfield('-178e-3',tfield2,testdummy), '-00018','add leading zeroes')
self.assertEqual(self.edi._formatfield('-178e-5',tfield2,testdummy), '-00000','add leading zeroes')
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'178E+4',tfield2,testdummy) #too big with exp
tfield4 = ['TEST1', 'M', 80, 'I', True, 3, 0,'I']
self.assertEqual(self.edi._formatfield('123456789012340',tfield4,testdummy), '123456789012340000','lot of digits')
def test_out_formatfield_var_D(self):
tfield1 = ['TEST1', 'M', 20, 'D', True, 0, 0,'D']
# length decimals minlength
self.assertEqual(self.edi._formatfield('20071001',tfield1,testdummy), '20071001','basic')
self.assertEqual(self.edi._formatfield('071001',tfield1,testdummy), '071001','basic')
self.assertEqual(self.edi._formatfield('99991001',tfield1,testdummy), '99991001','max year')
self.assertEqual(self.edi._formatfield('00011001',tfield1,testdummy), '00011001','min year')
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'2007093112',tfield1,testdummy) #too long
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'20070931',tfield1,testdummy) #no valid date
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'-0070931',tfield1,testdummy) #no valid date
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'70931',tfield1,testdummy) #too short
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'0931',tfield1,testdummy) #too short
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'0931BC',tfield1,testdummy) #alfanum
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'OOOOBC',tfield1,testdummy) #alfanum
def test_out_formatfield_var_T(self):
tfield1 = ['TEST1', 'M', 10, 'T', True, 0, 0,'T']
# length decimals minlength
self.assertEqual(self.edi._formatfield('2359',tfield1,testdummy), '2359','basic')
self.assertEqual(self.edi._formatfield('0000',tfield1,testdummy), '0000','basic')
self.assertEqual(self.edi._formatfield('000000',tfield1,testdummy), '000000','basic')
self.assertEqual(self.edi._formatfield('230000',tfield1,testdummy), '230000','basic')
self.assertEqual(self.edi._formatfield('235959',tfield1,testdummy), '235959','basic')
self.assertEqual(self.edi._formatfield('123456',tfield1,testdummy), '123456','basic')
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'12345678',tfield1,testdummy) #no valid time
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'240001',tfield1,testdummy) #no valid time
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'126101',tfield1,testdummy) #no valid time
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'120062',tfield1,testdummy) #no valid time - python allows 61 secnds?
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'240000',tfield1,testdummy) #no valid time
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'250001',tfield1,testdummy) #no valid time
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'-12000',tfield1,testdummy) #no valid time
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'120',tfield1,testdummy) #no valid time
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'0931233',tfield1,testdummy) #too short
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'11PM',tfield1,testdummy) #alfanum
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'TIME',tfield1,testdummy) #alfanum
tfield2 = ['TEST1', 'M', 4, 'T', True, 0, 4,'T']
# length decimals minlength
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'230001',tfield2,testdummy) #time too long
tfield3 = ['TEST1', 'M', 6, 'T', True, 0, 6,'T']
# length decimals minlength
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'2300',tfield3,testdummy) #time too short
def test_out_formatfield_var_A(self):
tfield1 = ['TEST1', 'M', 5, 'A', True, 0, 0,'A']
# length decimals minlength
self.assertEqual(self.edi._formatfield('abcde',tfield1,testdummy), 'abcde','basic')
self.assertEqual(self.edi._formatfield('',tfield1,testdummy), '','basic')
self.assertEqual(self.edi._formatfield('a b',tfield1,testdummy), 'a b','basic')
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'abcdef',tfield1,testdummy) #no valid date
tfield1 = ['TEST1', 'M', 5, 'A', True, 0, 2,'A']
# length decimals minlength
self.assertEqual(self.edi._formatfield('abcde',tfield1,testdummy), 'abcde','basic')
self.assertEqual(self.edi._formatfield('a b',tfield1,testdummy), 'a b','basic')
self.assertEqual(self.edi._formatfield('aa',tfield1,testdummy), 'aa','basic')
self.assertEqual(self.edi._formatfield('aaa',tfield1,testdummy), 'aaa','basic')
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'a',tfield1,testdummy) #field too small
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,' ',tfield1,testdummy) #field too small
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'',tfield1,testdummy) #field too small
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'abcdef',tfield1,testdummy) #no valid date
class TestFormatFieldFixedOutmessage(unittest.TestCase):
def setUp(self):
self.edi = outmessage.outmessage_init(editype='fixed',messagetype='ordersfixed')
def test_out_formatfield_fixedR(self):
self.edi.ta_info['lengthnumericbare']=False
self.edi.ta_info['decimaal']='.'
tfield1 = ['TEST1','M',3,'R',True,0, 3, 'R']
# length decimals minlength format
self.assertEqual(self.edi._formatfield('',tfield1,testdummy), '000','empty string')
self.assertEqual(self.edi._formatfield('1',tfield1,testdummy), '001', 'basic')
self.assertEqual(self.edi._formatfield(' 1',tfield1,testdummy), '001', 'basic')
self.assertEqual(self.edi._formatfield('1 ',tfield1,testdummy), '001', 'basic')
self.assertEqual(self.edi._formatfield('0',tfield1,testdummy), '000','zero stays zero')
self.assertEqual(self.edi._formatfield('-0',tfield1,testdummy), '-00','neg.zero stays neg.zero')
tfield3 = ['TEST1','M',5,'R',True,2, 3, 'R']
self.assertEqual(self.edi._formatfield('-0.00',tfield3,testdummy), '-0.00','')
self.assertEqual(self.edi._formatfield('0.10',tfield3,testdummy), '0.10','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('123',tfield1,testdummy), '123','numeric field at max')
self.assertEqual(self.edi._formatfield('001',tfield1,testdummy), '001','leading zeroes are removed')
self.assertEqual(self.edi._formatfield('0001',tfield1,testdummy), '001','strips leading zeroes if possobel')
self.assertEqual(self.edi._formatfield('+123',tfield1,testdummy), '123','strips leading zeroes if possobel')
self.edi.ta_info['decimaal']=','
self.assertEqual(self.edi._formatfield('1.2',tfield1,testdummy), '1,2','other dec.sig, replace')
self.edi.ta_info['decimaal']='.'
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'.12',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'-.12',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1234',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'-1.234',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1<3',tfield1,testdummy) #wrong char
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1-3',tfield1,testdummy) #'-' in middel of number
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'123-',tfield1,testdummy) #'-' at end of number
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1,3',tfield1,testdummy) #',', where ',' is not traid sep.
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1+3',tfield1,testdummy) #'+' in middle of number (no exp)
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1E3',tfield1,testdummy) #'+' in middle of number (no exp)
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'13+',tfield1,testdummy) #'+' at end
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1+3',tfield1,testdummy) #'+' in middle of number
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'0.100',tfield1,testdummy) #field too big
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'.',tfield1,testdummy) #no num
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'-',tfield1,testdummy) #no num
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'.001',tfield1,testdummy) #bots adds 0 before dec, thus too big
# #test filling up to min length
tfield2 = ['TEST1', 'M', 8, 'R', True, 0, 8,'R']
self.assertEqual(self.edi._formatfield('12345',tfield2,testdummy), '00012345','just large enough')
self.assertEqual(self.edi._formatfield('0.1000',tfield2,testdummy), '000.1000','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('00001',tfield2,testdummy), '00000001','keep leading zeroes')
self.assertEqual(self.edi._formatfield('123',tfield2,testdummy), '00000123','add leading zeroes')
self.assertEqual(self.edi._formatfield('.1',tfield2,testdummy), '000000.1','add leading zeroes')
self.assertEqual(self.edi._formatfield('-1.23',tfield2,testdummy), '-0001.23','numeric field at max with minus and decimal sign')
#test exp
self.assertEqual(self.edi._formatfield('12E+3',tfield2,testdummy), '00012000','Exponent notation is possible')
self.assertEqual(self.edi._formatfield('12E3',tfield2,testdummy), '00012000','Exponent notation is possible->to std notation')
self.assertEqual(self.edi._formatfield('12e+3',tfield2,testdummy), '00012000','Exponent notation is possible; e->E')
self.assertEqual(self.edi._formatfield('12e3',tfield2,testdummy), '00012000','Exponent notation is possible; e->E')
self.assertEqual(self.edi._formatfield('4567E+3',tfield2,testdummy), '04567000','do not count + and E')
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'12345678E+3',tfield2,testdummy) #too big with exp
#~ #print '>>',self.edi._formatfield('12E-3',tfield2,testdummy)
#~ self.assertEqual(self.edi._formatfield('12E-3',tfield2,testdummy), '0000.012','Exponent notation is possible')
#~ self.assertEqual(self.edi._formatfield('12e-3',tfield2,testdummy), '0000.012','Exponent notation is possible; e->E')
#~ self.assertEqual(self.edi._formatfield('1234567E-3',tfield2,testdummy), '1234.567','do not count + and E')
#~ self.assertEqual(self.edi._formatfield('1234567E-6',tfield2,testdummy), '1.234567','do not count + and E')
#~ self.assertEqual(self.edi._formatfield('123456E-6',tfield2,testdummy), '0.123456','do not count + and E')
#~ self.assertEqual(self.edi._formatfield('-12345E-5',tfield2,testdummy), '-0.12345','do not count + and E')
#~ self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'12345678E-8',tfield2,testdummy) #gets 0.12345678, is too big
#~ self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'12345678E+3',tfield2,testdummy) #too big with exp
tfield4 = ['TEST1', 'M', 30, 'R', True, 3, 30,'R']
self.assertEqual(self.edi._formatfield('12345678901234560',tfield4,testdummy), '000000000000012345678901234560','lot of digits')
tfield5 = ['TEST1','M',4,'R',True,2, 4, 'R']
self.assertEqual(self.edi._formatfield('0.00',tfield5,testdummy), '0.00','lot of digits')
tfield6 = ['TEST1','M',5,'R',True,2, 5, 'R']
self.assertEqual(self.edi._formatfield('12.45',tfield6,testdummy), '12.45','lot of digits')
def test_out_formatfield_fixedRL(self):
self.edi.ta_info['lengthnumericbare']=False
self.edi.ta_info['decimaal']='.'
tfield1 = ['TEST1','M',3,'RL',True,0, 3, 'R']
# length decimals minlength format
self.assertEqual(self.edi._formatfield('',tfield1,testdummy), '0 ','empty string')
self.assertEqual(self.edi._formatfield('1',tfield1,testdummy), '1 ', 'basic')
self.assertEqual(self.edi._formatfield(' 1',tfield1,testdummy), '1 ', 'basic')
self.assertEqual(self.edi._formatfield('1 ',tfield1,testdummy), '1 ', 'basic')
self.assertEqual(self.edi._formatfield('0',tfield1,testdummy), '0 ','zero stays zero')
self.assertEqual(self.edi._formatfield('-0',tfield1,testdummy), '-0 ','neg.zero stays neg.zero')
tfield3 = ['TEST1','M',5,'RL',True,2, 3, 'R']
self.assertEqual(self.edi._formatfield('-0.00',tfield3,testdummy), '-0.00','')
self.assertEqual(self.edi._formatfield('0.10',tfield3,testdummy), '0.10','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('123',tfield1,testdummy), '123','numeric field at max')
self.assertEqual(self.edi._formatfield('001',tfield1,testdummy), '1 ','leading zeroes are removed')
self.assertEqual(self.edi._formatfield('0001',tfield1,testdummy), '1 ','strips leading zeroes if possobel')
self.assertEqual(self.edi._formatfield('+123',tfield1,testdummy), '123','strips leading zeroes if possobel')
self.edi.ta_info['decimaal']=','
self.assertEqual(self.edi._formatfield('1.2',tfield1,testdummy), '1,2','other dec.sig, replace')
self.edi.ta_info['decimaal']='.'
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'.12',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'-.12',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1234',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'-1.234',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1<3',tfield1,testdummy) #wrong char
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1-3',tfield1,testdummy) #'-' in middel of number
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'123-',tfield1,testdummy) #'-' at end of number
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1,3',tfield1,testdummy) #',', where ',' is not traid sep.
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1+3',tfield1,testdummy) #'+' in middle of number (no exp)
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1E3',tfield1,testdummy) #'+' in middle of number (no exp)
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'13+',tfield1,testdummy) #'+' at end
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1+3',tfield1,testdummy) #'+' in middle of number
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'0.100',tfield1,testdummy) #field too big
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'.',tfield1,testdummy) #no num
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'-',tfield1,testdummy) #no num
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'.001',tfield1,testdummy) #bots adds 0 before dec, thus too big
# #test filling up to min length
tfield2 = ['TEST1', 'M', 8, 'RL', True, 0, 8,'R']
self.assertEqual(self.edi._formatfield('12345',tfield2,testdummy), '12345 ','just large enough')
self.assertEqual(self.edi._formatfield('0.1000',tfield2,testdummy), '0.1000 ','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('00001',tfield2,testdummy), '1 ','keep leading zeroes')
self.assertEqual(self.edi._formatfield('123',tfield2,testdummy), '123 ','add leading zeroes')
self.assertEqual(self.edi._formatfield('.1',tfield2,testdummy), '0.1 ','add leading zeroes')
self.assertEqual(self.edi._formatfield('-1.23',tfield2,testdummy), '-1.23 ','numeric field at max with minus and decimal sign')
#test exp
self.assertEqual(self.edi._formatfield('12E+3',tfield2,testdummy), '12000 ','Exponent notation is possible')
self.assertEqual(self.edi._formatfield('12E3',tfield2,testdummy), '12000 ','Exponent notation is possible->to std notation')
self.assertEqual(self.edi._formatfield('12e+3',tfield2,testdummy), '12000 ','Exponent notation is possible; e->E')
self.assertEqual(self.edi._formatfield('12e3',tfield2,testdummy), '12000 ','Exponent notation is possible; e->E')
self.assertEqual(self.edi._formatfield('4567E+3',tfield2,testdummy), '4567000 ','do not count + and E')
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'12345678E+3',tfield2,testdummy) #too big with exp
#~ #print '>>',self.edi._formatfield('12E-3',tfield2,testdummy)
#~ self.assertEqual(self.edi._formatfield('12E-3',tfield2,testdummy), '0000.012','Exponent notation is possible')
#~ self.assertEqual(self.edi._formatfield('12e-3',tfield2,testdummy), '0000.012','Exponent notation is possible; e->E')
#~ self.assertEqual(self.edi._formatfield('1234567E-3',tfield2,testdummy), '1234.567','do not count + and E')
#~ self.assertEqual(self.edi._formatfield('1234567E-6',tfield2,testdummy), '1.234567','do not count + and E')
#~ self.assertEqual(self.edi._formatfield('123456E-6',tfield2,testdummy), '0.123456','do not count + and E')
#~ self.assertEqual(self.edi._formatfield('-12345E-5',tfield2,testdummy), '-0.12345','do not count + and E')
#~ self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'12345678E-8',tfield2,testdummy) #gets 0.12345678, is too big
#~ self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'12345678E+3',tfield2,testdummy) #too big with exp
tfield4 = ['TEST1', 'M', 30, 'RL', True, 3, 30,'R']
self.assertEqual(self.edi._formatfield('12345678901234560',tfield4,testdummy), '12345678901234560 ','lot of digits')
tfield5 = ['TEST1','M',4,'RL',True,2, 4, 'N']
self.assertEqual(self.edi._formatfield('0.00',tfield5,testdummy), '0.00','lot of digits')
tfield6 = ['TEST1','M',5,'RL',True,2, 5, 'N']
self.assertEqual(self.edi._formatfield('12.45',tfield6,testdummy), '12.45','lot of digits')
def test_out_formatfield_fixedRR(self):
self.edi.ta_info['lengthnumericbare']=False
self.edi.ta_info['decimaal']='.'
tfield1 = ['TEST1','M',3,'RR',True,0, 3, 'R']
# length decimals minlength format
self.assertEqual(self.edi._formatfield('',tfield1,testdummy), ' 0','empty string')
self.assertEqual(self.edi._formatfield('1',tfield1,testdummy), ' 1', 'basic')
self.assertEqual(self.edi._formatfield(' 1',tfield1,testdummy), ' 1', 'basic')
self.assertEqual(self.edi._formatfield('1 ',tfield1,testdummy), ' 1', 'basic')
self.assertEqual(self.edi._formatfield('0',tfield1,testdummy), ' 0','zero stays zero')
self.assertEqual(self.edi._formatfield('-0',tfield1,testdummy), ' -0','neg.zero stays neg.zero')
tfield3 = ['TEST1','M',5,'RR',True,2, 3, 'R']
self.assertEqual(self.edi._formatfield('-0.00',tfield3,testdummy), '-0.00','')
self.assertEqual(self.edi._formatfield('0.10',tfield3,testdummy), '0.10','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('123',tfield1,testdummy), '123','numeric field at max')
self.assertEqual(self.edi._formatfield('001',tfield1,testdummy), ' 1','leading zeroes are removed')
self.assertEqual(self.edi._formatfield('0001',tfield1,testdummy), ' 1','strips leading zeroes if possobel')
self.assertEqual(self.edi._formatfield('+123',tfield1,testdummy), '123','strips leading zeroes if possobel')
self.edi.ta_info['decimaal']=','
self.assertEqual(self.edi._formatfield('1.2',tfield1,testdummy), '1,2','other dec.sig, replace')
self.edi.ta_info['decimaal']='.'
self.assertRaises(botslib.OutMessageError,self.edi._formatfield, '.12',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield, '-.12',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield, '1234',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield, '-1.234',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield, '1<3',tfield1,testdummy) #wrong char
self.assertRaises(botslib.OutMessageError,self.edi._formatfield, '1-3',tfield1,testdummy) #'-' in middel of number
self.assertRaises(botslib.OutMessageError,self.edi._formatfield, '123-',tfield1,testdummy) #'-' at end of number
self.assertRaises(botslib.OutMessageError,self.edi._formatfield, '1,3',tfield1,testdummy) #',', where ',' is not traid sep.
self.assertRaises(botslib.OutMessageError,self.edi._formatfield, '1+3',tfield1,testdummy) #'+' in middle of number (no exp)
self.assertRaises(botslib.OutMessageError,self.edi._formatfield, '1E3',tfield1,testdummy) #'+' in middle of number (no exp)
self.assertRaises(botslib.OutMessageError,self.edi._formatfield, '13+',tfield1,testdummy) #'+' at end
self.assertRaises(botslib.OutMessageError,self.edi._formatfield, '1+3',tfield1,testdummy) #'+' in middle of number
self.assertRaises(botslib.OutMessageError,self.edi._formatfield, '0.100',tfield1,testdummy) #field too big
self.assertRaises(botslib.OutMessageError,self.edi._formatfield, '.',tfield1,testdummy) #no num
self.assertRaises(botslib.OutMessageError,self.edi._formatfield, '-',tfield1,testdummy) #no num
self.assertRaises(botslib.OutMessageError,self.edi._formatfield, '.001',tfield1,testdummy) #bots adds 0 before dec, thus too big
# #test filling up to min length
tfield2 = ['TEST1', 'M', 8, 'RR', True, 0, 8,'R']
self.assertEqual(self.edi._formatfield('12345',tfield2,testdummy), ' 12345','just large enough')
self.assertEqual(self.edi._formatfield('0.1000',tfield2,testdummy), ' 0.1000','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('00001',tfield2,testdummy), ' 1','keep leading zeroes')
self.assertEqual(self.edi._formatfield('123',tfield2,testdummy), ' 123','add leading zeroes')
self.assertEqual(self.edi._formatfield('.1',tfield2,testdummy), ' 0.1','add leading zeroes')
self.assertEqual(self.edi._formatfield('-1.23',tfield2,testdummy), ' -1.23','numeric field at max with minus and decimal sign')
#test exp
self.assertEqual(self.edi._formatfield('12E+3',tfield2,testdummy), ' 12000','Exponent notation is possible')
self.assertEqual(self.edi._formatfield('12E3',tfield2,testdummy), ' 12000','Exponent notation is possible->to std notation')
self.assertEqual(self.edi._formatfield('12e+3',tfield2,testdummy), ' 12000','Exponent notation is possible; e->E')
self.assertEqual(self.edi._formatfield('12e3',tfield2,testdummy), ' 12000','Exponent notation is possible; e->E')
self.assertEqual(self.edi._formatfield('4567E+3',tfield2,testdummy), ' 4567000','do not count + and E')
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'12345678E+3',tfield2,testdummy) #too big with exp
#~ #print '>>',self.edi._formatfield('12E-3',tfield2,testdummy)
#~ self.assertEqual(self.edi._formatfield('12E-3',tfield2,testdummy), '0000.012','Exponent notation is possible')
#~ self.assertEqual(self.edi._formatfield('12e-3',tfield2,testdummy), '0000.012','Exponent notation is possible; e->E')
#~ self.assertEqual(self.edi._formatfield('1234567E-3',tfield2,testdummy), '1234.567','do not count + and E')
#~ self.assertEqual(self.edi._formatfield('1234567E-6',tfield2,testdummy), '1.234567','do not count + and E')
#~ self.assertEqual(self.edi._formatfield('123456E-6',tfield2,testdummy), '0.123456','do not count + and E')
#~ self.assertEqual(self.edi._formatfield('-12345E-5',tfield2,testdummy), '-0.12345','do not count + and E')
#~ self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'12345678E-8',tfield2,testdummy) #gets 0.12345678, is too big
#~ self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'12345678E+3',tfield2,testdummy) #too big with exp
tfield4 = ['TEST1', 'M', 30, 'RR', True, 3, 30,'R']
self.assertEqual(self.edi._formatfield('12345678901234560',tfield4,testdummy), ' 12345678901234560','lot of digits')
tfield5 = ['TEST1','M',4,'RR',True,2, 4, 'N']
self.assertEqual(self.edi._formatfield('0.00',tfield5,testdummy), '0.00','lot of digits')
tfield6 = ['TEST1','M',5,'RR',True,2, 5, 'N']
self.assertEqual(self.edi._formatfield('12.45',tfield6,testdummy), '12.45','lot of digits')
def test_out_formatfield_fixedN(self):
self.edi.ta_info['decimaal']='.'
self.edi.ta_info['lengthnumericbare']=False
tfield1 = ['TEST1','M',5,'N',True,2, 5, 'N']
# length decimals minlength format
self.assertEqual(self.edi._formatfield('',tfield1,testdummy), '00.00','empty string')
self.assertEqual(self.edi._formatfield('1',tfield1,testdummy), '01.00', 'basic')
self.assertEqual(self.edi._formatfield(' 1',tfield1,testdummy), '01.00', 'basic')
self.assertEqual(self.edi._formatfield('1 ',tfield1,testdummy), '01.00', 'basic')
self.assertEqual(self.edi._formatfield('0',tfield1,testdummy), '00.00','zero stays zero')
self.assertEqual(self.edi._formatfield('-0',tfield1,testdummy), '-0.00','neg.zero stays neg.zero')
self.assertEqual(self.edi._formatfield('-0.00',tfield1,testdummy), '-0.00','')
self.assertEqual(self.edi._formatfield('-0.001',tfield1,testdummy), '-0.00','')
self.assertEqual(self.edi._formatfield('-.12',tfield1,testdummy), '-0.12','no zero before dec,sign is OK')
self.assertEqual(self.edi._formatfield('001',tfield1,testdummy), '01.00','leading zeroes are removed')
self.assertEqual(self.edi._formatfield('0.10',tfield1,testdummy), '00.10','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('12.1049',tfield1,testdummy), '12.10','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('-1.23',tfield1,testdummy), '-1.23','numeric field at max with minus and decimal sign')
self.assertEqual(self.edi._formatfield('0001',tfield1,testdummy), '01.00','strips leading zeroes if possobel')
self.assertEqual(self.edi._formatfield('+13',tfield1,testdummy), '13.00','strips leading zeroes if possobel')
self.edi.ta_info['decimaal']=','
self.assertEqual(self.edi._formatfield('1.23',tfield1,testdummy), '01,23','other dec.sig, replace')
self.edi.ta_info['decimaal']='.'
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'123.1049',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'123',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1234',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'-1234.56',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1<3',tfield1,testdummy) #wrong char
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1-3',tfield1,testdummy) #'-' in middel of number
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'123-',tfield1,testdummy) #'-' at end of number
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1,3',tfield1,testdummy) #',', where ',' is not traid sep.
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1+3',tfield1,testdummy) #'+' in middle of number (no exp)
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1E3',tfield1,testdummy) #'+' in middle of number (no exp)
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'13+',tfield1,testdummy) #'+' at end
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1234.100',tfield1,testdummy) #field too big
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'.',tfield1,testdummy) #no num
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'-',tfield1,testdummy) #no num
# #test filling up to min length
tfield2 = ['TEST1', 'M', 8, 'N', True, 2, 8,'N']
self.assertEqual(self.edi._formatfield('123.45',tfield2,testdummy), '00123.45','just large enough')
self.assertEqual(self.edi._formatfield('123.4549',tfield2,testdummy), '00123.45','just large enough')
self.assertEqual(self.edi._formatfield('123.455',tfield2,testdummy), '00123.46','just large enough')
self.assertEqual(self.edi._formatfield('0.1000',tfield2,testdummy), '00000.10','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('00001',tfield2,testdummy), '00001.00','keep leading zeroes')
self.assertEqual(self.edi._formatfield('12',tfield2,testdummy), '00012.00','add leading zeroes')
self.assertEqual(self.edi._formatfield('.1',tfield2,testdummy), '00000.10','add leading zeroes')
#test exp; bots tries to convert to normal
self.assertEqual(self.edi._formatfield('78E+3',tfield2,testdummy), '78000.00','add leading zeroes')
self.assertEqual(self.edi._formatfield('-8E+3',tfield2,testdummy), '-8000.00','add leading zeroes')
self.assertEqual(self.edi._formatfield('-178e-3',tfield2,testdummy), '-0000.18','add leading zeroes')
self.assertEqual(self.edi._formatfield('-178e-5',tfield2,testdummy), '-0000.00','add leading zeroes')
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'178E+4',tfield2,testdummy) #too big with exp
tfield4 = ['TEST1', 'M', 30, 'N', True, 3, 30,'N']
self.assertEqual(self.edi._formatfield('1234567890123456',tfield4,testdummy), '00000000001234567890123456.000','lot of digits')
#test N format, zero decimals
tfield7 = ['TEST1', 'M', 5, 'N', True, 0, 5, 'N']
# length decimals minlength format
self.assertEqual(self.edi._formatfield('12345',tfield7,testdummy), '12345','')
self.assertEqual(self.edi._formatfield('1.234',tfield7,testdummy), '00001','')
self.assertEqual(self.edi._formatfield('123.4',tfield7,testdummy), '00123','')
self.assertEqual(self.edi._formatfield('0.0',tfield7,testdummy), '00000','')
def test_out_formatfield_fixedNL(self):
self.edi.ta_info['decimaal']='.'
self.edi.ta_info['lengthnumericbare']=False
tfield1 = ['TEST1','M',5,'NL',True,2, 5, 'N']
# length decimals minlength format
self.assertEqual(self.edi._formatfield('',tfield1,testdummy), '0.00 ','empty string')
self.assertEqual(self.edi._formatfield('1',tfield1,testdummy), '1.00 ', 'basic')
self.assertEqual(self.edi._formatfield(' 1',tfield1,testdummy), '1.00 ', 'basic')
self.assertEqual(self.edi._formatfield('1 ',tfield1,testdummy), '1.00 ', 'basic')
self.assertEqual(self.edi._formatfield('0',tfield1,testdummy), '0.00 ','zero stays zero')
self.assertEqual(self.edi._formatfield('-0',tfield1,testdummy), '-0.00','neg.zero stays neg.zero')
self.assertEqual(self.edi._formatfield('-0.00',tfield1,testdummy), '-0.00','')
self.assertEqual(self.edi._formatfield('-0.001',tfield1,testdummy), '-0.00','')
self.assertEqual(self.edi._formatfield('-.12',tfield1,testdummy), '-0.12','no zero before dec,sign is OK')
self.assertEqual(self.edi._formatfield('001',tfield1,testdummy), '1.00 ','leading zeroes are removed')
self.assertEqual(self.edi._formatfield('0.10',tfield1,testdummy), '0.10 ','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('12.1049',tfield1,testdummy), '12.10','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('-1.23',tfield1,testdummy), '-1.23','numeric field at max with minus and decimal sign')
self.assertEqual(self.edi._formatfield('0001',tfield1,testdummy), '1.00 ','strips leading zeroes if possobel')
self.assertEqual(self.edi._formatfield('+13',tfield1,testdummy), '13.00','strips leading zeroes if possobel')
self.edi.ta_info['decimaal']=','
self.assertEqual(self.edi._formatfield('1.23',tfield1,testdummy), '1,23 ','other dec.sig, replace')
self.edi.ta_info['decimaal']='.'
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'123.1049',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'123',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1234',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'-1234.56',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1<3',tfield1,testdummy) #wrong char
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1-3',tfield1,testdummy) #'-' in middel of number
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'123-',tfield1,testdummy) #'-' at end of number
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1,3',tfield1,testdummy) #',', where ',' is not traid sep.
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1+3',tfield1,testdummy) #'+' in middle of number (no exp)
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1E3',tfield1,testdummy) #'+' in middle of number (no exp)
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'13+',tfield1,testdummy) #'+' at end
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1234.100',tfield1,testdummy) #field too big
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'.',tfield1,testdummy) #no num
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'-',tfield1,testdummy) #no num
# #test filling up to min length
tfield2 = ['TEST1', 'M', 8, 'NL', True, 2, 8,'N']
self.assertEqual(self.edi._formatfield('123.45',tfield2,testdummy), '123.45 ','just large enough')
self.assertEqual(self.edi._formatfield('123.4549',tfield2,testdummy), '123.45 ','just large enough')
self.assertEqual(self.edi._formatfield('123.455',tfield2,testdummy), '123.46 ','just large enough')
self.assertEqual(self.edi._formatfield('0.1000',tfield2,testdummy), '0.10 ','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('00001',tfield2,testdummy), '1.00 ','keep leading zeroes')
self.assertEqual(self.edi._formatfield('12',tfield2,testdummy), '12.00 ','add leading zeroes')
self.assertEqual(self.edi._formatfield('.1',tfield2,testdummy), '0.10 ','add leading zeroes')
#test exp; bots tries to convert to normal
self.assertEqual(self.edi._formatfield('78E+3',tfield2,testdummy), '78000.00','add leading zeroes')
self.assertEqual(self.edi._formatfield('-8E+3',tfield2,testdummy), '-8000.00','add leading zeroes')
self.assertEqual(self.edi._formatfield('-178e-3',tfield2,testdummy), '-0.18 ','add leading zeroes')
self.assertEqual(self.edi._formatfield('-178e-5',tfield2,testdummy), '-0.00 ','add leading zeroes')
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'178E+4',tfield2,testdummy) #too big with exp
tfield4 = ['TEST1', 'M', 30, 'NL', True, 3, 30,'N']
self.assertEqual(self.edi._formatfield('1234567890123456',tfield4,testdummy), '1234567890123456.000 ','lot of digits')
#test N format, zero decimals
tfield7 = ['TEST1', 'M', 5, 'NL', True, 0, 5, 'N']
# length decimals minlength format
self.assertEqual(self.edi._formatfield('12345',tfield7,testdummy), '12345','')
self.assertEqual(self.edi._formatfield('1.234',tfield7,testdummy), '1 ','')
self.assertEqual(self.edi._formatfield('123.4',tfield7,testdummy), '123 ','')
self.assertEqual(self.edi._formatfield('0.0',tfield7,testdummy), '0 ','')
def test_out_formatfield_fixedNR(self):
self.edi.ta_info['decimaal']='.'
self.edi.ta_info['lengthnumericbare']=False
tfield1 = ['TEST1','M',5,'NR',True,2, 5, 'N']
# length decimals minlength format
self.assertEqual(self.edi._formatfield('',tfield1,testdummy), ' 0.00','empty string')
self.assertEqual(self.edi._formatfield('1',tfield1,testdummy), ' 1.00', 'basic')
self.assertEqual(self.edi._formatfield(' 1',tfield1,testdummy), ' 1.00', 'basic')
self.assertEqual(self.edi._formatfield('1 ',tfield1,testdummy), ' 1.00', 'basic')
self.assertEqual(self.edi._formatfield('0',tfield1,testdummy), ' 0.00','zero stays zero')
self.assertEqual(self.edi._formatfield('-0',tfield1,testdummy), '-0.00','neg.zero stays neg.zero')
self.assertEqual(self.edi._formatfield('-0.00',tfield1,testdummy), '-0.00','')
self.assertEqual(self.edi._formatfield('-0.001',tfield1,testdummy), '-0.00','')
self.assertEqual(self.edi._formatfield('-.12',tfield1,testdummy), '-0.12','no zero before dec,sign is OK')
self.assertEqual(self.edi._formatfield('001',tfield1,testdummy), ' 1.00','leading zeroes are removed')
self.assertEqual(self.edi._formatfield('0.10',tfield1,testdummy), ' 0.10','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('12.1049',tfield1,testdummy), '12.10','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('-1.23',tfield1,testdummy), '-1.23','numeric field at max with minus and decimal sign')
self.assertEqual(self.edi._formatfield('0001',tfield1,testdummy), ' 1.00','strips leading zeroes if possobel')
self.assertEqual(self.edi._formatfield('+13',tfield1,testdummy), '13.00','strips leading zeroes if possobel')
self.edi.ta_info['decimaal']=','
self.assertEqual(self.edi._formatfield('1.23',tfield1,testdummy), ' 1,23','other dec.sig, replace')
self.edi.ta_info['decimaal']='.'
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'123.1049',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'123',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1234',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'-1234.56',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1<3',tfield1,testdummy) #wrong char
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1-3',tfield1,testdummy) #'-' in middel of number
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'123-',tfield1,testdummy) #'-' at end of number
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1,3',tfield1,testdummy) #',', where ',' is not traid sep.
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1+3',tfield1,testdummy) #'+' in middle of number (no exp)
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1E3',tfield1,testdummy) #'+' in middle of number (no exp)
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'13+',tfield1,testdummy) #'+' at end
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1234.100',tfield1,testdummy) #field too big
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'.',tfield1,testdummy) #no num
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'-',tfield1,testdummy) #no num
# #test filling up to min length
tfield2 = ['TEST1', 'M', 8, 'NR', True, 2, 8,'N']
self.assertEqual(self.edi._formatfield('123.45',tfield2,testdummy), ' 123.45','just large enough')
self.assertEqual(self.edi._formatfield('123.4549',tfield2,testdummy), ' 123.45','just large enough')
self.assertEqual(self.edi._formatfield('123.455',tfield2,testdummy), ' 123.46','just large enough')
self.assertEqual(self.edi._formatfield('0.1000',tfield2,testdummy), ' 0.10','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('00001',tfield2,testdummy), ' 1.00','keep leading zeroes')
self.assertEqual(self.edi._formatfield('12',tfield2,testdummy), ' 12.00','add leading zeroes')
self.assertEqual(self.edi._formatfield('.1',tfield2,testdummy), ' 0.10','add leading zeroes')
#test exp; bots tries to convert to normal
self.assertEqual(self.edi._formatfield('78E+3',tfield2,testdummy), '78000.00','add leading zeroes')
self.assertEqual(self.edi._formatfield('-8E+3',tfield2,testdummy), '-8000.00','add leading zeroes')
self.assertEqual(self.edi._formatfield('-178e-3',tfield2,testdummy), ' -0.18','add leading zeroes')
self.assertEqual(self.edi._formatfield('-178e-5',tfield2,testdummy), ' -0.00','add leading zeroes')
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'178E+4',tfield2,testdummy) #too big with exp
tfield4 = ['TEST1', 'M', 30, 'NR', True, 3, 30,'N']
self.assertEqual(self.edi._formatfield('1234567890123456',tfield4,testdummy), ' 1234567890123456.000','lot of digits')
#test N format, zero decimals
tfield7 = ['TEST1', 'M', 5, 'NR', True, 0, 5, 'N']
# length decimals minlength format
self.assertEqual(self.edi._formatfield('12345',tfield7,testdummy), '12345','')
self.assertEqual(self.edi._formatfield('1.234',tfield7,testdummy), ' 1','')
self.assertEqual(self.edi._formatfield('123.4',tfield7,testdummy), ' 123','')
self.assertEqual(self.edi._formatfield('0.0',tfield7,testdummy), ' 0','')
def test_out_formatfield_fixedI(self):
self.edi.ta_info['lengthnumericbare']=False
self.edi.ta_info['decimaal']='.'
tfield1 = ['TEST1','M',5,'I',True,2, 5, 'I']
# length decimals minlength format
self.assertEqual(self.edi._formatfield('',tfield1,testdummy), '00000','empty string is initialised as 00000')
self.assertEqual(self.edi._formatfield('1',tfield1,testdummy), '00100', 'basic')
self.assertEqual(self.edi._formatfield(' 1',tfield1,testdummy), '00100', 'basic')
self.assertEqual(self.edi._formatfield('1 ',tfield1,testdummy), '00100', 'basic')
self.assertEqual(self.edi._formatfield('0',tfield1,testdummy), '00000','zero stays zero')
self.assertEqual(self.edi._formatfield('-0',tfield1,testdummy), '-0000','neg.zero stays neg.zero')
self.assertEqual(self.edi._formatfield('-0.00',tfield1,testdummy), '-0000','')
self.assertEqual(self.edi._formatfield('-0.001',tfield1,testdummy), '-0000','')
self.assertEqual(self.edi._formatfield('-.12',tfield1,testdummy), '-0012','no zero before dec,sign is OK') #TODO: puts ) in front
self.assertEqual(self.edi._formatfield('123',tfield1,testdummy), '12300','numeric field at max')
self.assertEqual(self.edi._formatfield('001',tfield1,testdummy), '00100','leading zeroes are removed')
self.assertEqual(self.edi._formatfield('0.10',tfield1,testdummy), '00010','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('123.1049',tfield1,testdummy), '12310','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('-1.23',tfield1,testdummy), '-0123','numeric field at max with minus and decimal sign')
self.assertEqual(self.edi._formatfield('0001',tfield1,testdummy), '00100','strips leading zeroes if possobel')
self.assertEqual(self.edi._formatfield('+123',tfield1,testdummy), '12300','strips leading zeroes if possobel')
self.edi.ta_info['decimaal']=','
self.assertEqual(self.edi._formatfield('1.23',tfield1,testdummy), '00123','other dec.sig, replace')
self.edi.ta_info['decimaal']='.'
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1234',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'-1234.56',tfield1,testdummy) #field too large
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1<3',tfield1,testdummy) #wrong char
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1-3',tfield1,testdummy) #'-' in middel of number
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'123-',tfield1,testdummy) #'-' at end of number
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1,3',tfield1,testdummy) #',', where ',' is not traid sep.
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1+3',tfield1,testdummy) #'+' in middle of number (no exp)
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1E3',tfield1,testdummy) #'+' in middle of number (no exp)
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'13+',tfield1,testdummy) #'+' at end
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1234.100',tfield1,testdummy) #field too big
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'.',tfield1,testdummy) #no num
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'-',tfield1,testdummy) #no num
#~ #test filling up to min length
tfield2 = ['TEST1', 'M', 8, 'I', True, 2, 8,'I']
self.assertEqual(self.edi._formatfield('123.45',tfield2,testdummy), '00012345','just large enough')
self.assertEqual(self.edi._formatfield('123.4549',tfield2,testdummy), '00012345','just large enough')
self.assertEqual(self.edi._formatfield('123.455',tfield2,testdummy), '00012346','just large enough')
self.assertEqual(self.edi._formatfield('0.1000',tfield2,testdummy), '00000010','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('00001',tfield2,testdummy), '00000100','keep leading zeroes')
self.assertEqual(self.edi._formatfield('12',tfield2,testdummy), '00001200','add leading zeroes')
self.assertEqual(self.edi._formatfield('.1',tfield2,testdummy), '00000010','add leading zeroes')
#test exp; bots tries to convert to normal
self.assertEqual(self.edi._formatfield('178E+3',tfield2,testdummy), '17800000','add leading zeroes')
self.assertEqual(self.edi._formatfield('-17E+3',tfield2,testdummy), '-1700000','add leading zeroes')
self.assertEqual(self.edi._formatfield('-178e-3',tfield2,testdummy), '-0000018','add leading zeroes')
self.assertEqual(self.edi._formatfield('-178e-5',tfield2,testdummy), '-0000000','add leading zeroes')
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'178E+4',tfield2,testdummy) #too big with exp
tfield4 = ['TEST1', 'M', 80, 'I', True, 3, 0,'I']
self.assertEqual(self.edi._formatfield('123456789012340',tfield4,testdummy), '123456789012340000','lot of digits')
def test_out_formatfield_fixedD(self):
tfield1 = ['TEST1', 'M', 8, 'D', True, 0, 8,'D']
# length decimals minlength
self.assertEqual(self.edi._formatfield('20071001',tfield1,testdummy), '20071001','basic')
self.assertEqual(self.edi._formatfield('99991001',tfield1,testdummy), '99991001','max year')
self.assertEqual(self.edi._formatfield('00011001',tfield1,testdummy), '00011001','min year')
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'2007093112',tfield1,testdummy) #too long
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'20070931',tfield1,testdummy) #no valid date
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'-0070931',tfield1,testdummy) #no valid date
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'70931',tfield1,testdummy) #too short
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'0931',tfield1,testdummy) #too short
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'0931BC',tfield1,testdummy) #alfanum
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'OOOOBC',tfield1,testdummy) #alfanum
tfield2 = ['TEST1', 'M', 6, 'D', True, 0, 6,'D']
# length decimals minlength
self.assertEqual(self.edi._formatfield('071001',tfield2,testdummy), '071001','basic')
def test_out_formatfield_fixedT(self):
tfield1 = ['TEST1', 'M', 4, 'T', True, 0, 4,'T']
# length decimals minlength
self.assertEqual(self.edi._formatfield('2359',tfield1,testdummy), '2359','basic')
self.assertEqual(self.edi._formatfield('0000',tfield1,testdummy), '0000','basic')
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'2401',tfield1,testdummy) #no valid date
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1261',tfield1,testdummy) #no valid date
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1262',tfield1,testdummy) #no valid date - python allows 61 secnds?
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'2400',tfield1,testdummy) #no valid date
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'2501',tfield1,testdummy) #no valid date
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'-1200',tfield1,testdummy) #no valid date
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'120',tfield1,testdummy) #too short
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'093123',tfield1,testdummy) #too long
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'11PM',tfield1,testdummy) #alfanum
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'TIME',tfield1,testdummy) #alfanum
tfield2 = ['TEST1', 'M', 6, 'T', True, 0, 6,'T']
# length decimals minlength
self.assertEqual(self.edi._formatfield('000000',tfield2,testdummy), '000000','basic')
self.assertEqual(self.edi._formatfield('230000',tfield2,testdummy), '230000','basic')
self.assertEqual(self.edi._formatfield('235959',tfield2,testdummy), '235959','basic')
self.assertEqual(self.edi._formatfield('123456',tfield2,testdummy), '123456','basic')
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'240001',tfield2,testdummy) #no valid date
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'126101',tfield2,testdummy) #no valid date
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'120062',tfield2,testdummy) #no valid date - python allows 61 secnds?
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'240000',tfield2,testdummy) #no valid date
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'250001',tfield2,testdummy) #no valid date
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'-12000',tfield2,testdummy) #no valid date
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'120',tfield2,testdummy) #too short
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'0931233',tfield2,testdummy) #too short
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'1100PM',tfield2,testdummy) #alfanum
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'11TIME',tfield2,testdummy) #alfanum
def test_out_formatfield_fixedA(self):
tfield1 = ['TEST1', 'M', 5, 'A', True, 0, 5,'A']
# length decimals minlength
self.assertEqual(self.edi._formatfield('abcde',tfield1,testdummy), 'abcde','basic')
self.assertEqual(self.edi._formatfield('',tfield1,testdummy), ' ','basic')
self.assertEqual(self.edi._formatfield('ab ',tfield1,testdummy), 'ab ','basic')
self.assertEqual(self.edi._formatfield('a b',tfield1,testdummy), 'a b ','basic')
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'abcdef',tfield1,testdummy) #no valid date
tfield1 = ['TEST1', 'M', 5, 'A', True, 0, 5,'A']
# length decimals minlength
self.assertEqual(self.edi._formatfield('abcde',tfield1,testdummy), 'abcde','basic')
self.assertEqual(self.edi._formatfield('ab ',tfield1,testdummy), 'ab ','basic')
self.assertEqual(self.edi._formatfield('a b',tfield1,testdummy), 'a b ','basic')
self.assertEqual(self.edi._formatfield('a',tfield1,testdummy), 'a ','basic')
self.assertEqual(self.edi._formatfield(' ',tfield1,testdummy), ' ','basic')
self.assertEqual(self.edi._formatfield(' ',tfield1,testdummy), ' ','basic')
self.assertEqual(self.edi._formatfield(' ',tfield1,testdummy), ' ','basic')
self.assertEqual(self.edi._formatfield('',tfield1,testdummy), ' ','basic')
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'abcdef',tfield1,testdummy) #no valid date
def test_out_formatfield_fixedAR(self):
tfield1 = ['TEST1', 'M', 5, 'AR', True, 0, 5,'A']
# length decimals minlength
self.assertEqual(self.edi._formatfield('abcde',tfield1,testdummy), 'abcde','basic')
self.assertEqual(self.edi._formatfield('',tfield1,testdummy), ' ','basic')
self.assertEqual(self.edi._formatfield('ab ',tfield1,testdummy), ' ab ','basic')
self.assertEqual(self.edi._formatfield('a b',tfield1,testdummy), ' a b','basic')
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'abcdef',tfield1,testdummy) #no valid date
tfield1 = ['TEST1', 'M', 5, 'AR', True, 0, 5,'A']
# length decimals minlength
self.assertEqual(self.edi._formatfield('abcde',tfield1,testdummy), 'abcde','basic')
self.assertEqual(self.edi._formatfield('ab ',tfield1,testdummy), ' ab ','basic')
self.assertEqual(self.edi._formatfield('a b',tfield1,testdummy), ' a b','basic')
self.assertEqual(self.edi._formatfield('a',tfield1,testdummy), ' a','basic')
self.assertEqual(self.edi._formatfield(' ',tfield1,testdummy), ' ','basic')
self.assertEqual(self.edi._formatfield(' ',tfield1,testdummy), ' ','basic')
self.assertEqual(self.edi._formatfield(' ',tfield1,testdummy), ' ','basic')
self.assertEqual(self.edi._formatfield('',tfield1,testdummy), ' ','basic')
self.assertRaises(botslib.OutMessageError,self.edi._formatfield,'abcdef',tfield1,testdummy) #no valid date
class TestFormatFieldInmessage(unittest.TestCase):
#both var and fixed fields are tested. Is not much difference (white-box testing)
def setUp(self):
#need to have a inmessage-object for tests. Read is a edifile and a grammar.
self.edi = inmessage.edifromfile(frompartner='',
topartner='',
filename='botssys/infile/unitformats/formats01.edi',
messagetype='edifact',
testindicator='0',
editype='edifact',
charset='UNOA',
alt='')
def testformatfieldR(self):
self.edi.ta_info['lengthnumericbare']=True
tfield1 = ['TEST1','M',3,'N',True,0, 0, 'R']
# length decimals minlength format
self.assertEqual(self.edi._formatfield('',tfield1,testdummy), '0', 'empty numeric string is accepted, is zero')
self.assertEqual(self.edi._formatfield('1',tfield1,testdummy), '1', 'basic')
self.assertEqual(self.edi._formatfield('0',tfield1,testdummy), '0','zero stays zero')
self.assertEqual(self.edi._formatfield('-0',tfield1,testdummy), '-0','neg.zero stays neg.zero')
self.assertEqual(self.edi._formatfield('-0.00',tfield1,testdummy), '-0.00','')
self.assertEqual(self.edi._formatfield('-.12',tfield1,testdummy), '-0.12','no zero before dec,sign is OK')
self.assertEqual(self.edi._formatfield('123',tfield1,testdummy), '123','numeric field at max')
self.assertEqual(self.edi._formatfield('001',tfield1,testdummy), '1','leading zeroes are removed')
self.assertEqual(self.edi._formatfield('0.10',tfield1,testdummy), '0.10','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('-1.23',tfield1,testdummy), '-1.23','numeric field at max with minus and decimal sign')
self.edi.ta_info['decimaal']=','
self.assertEqual(self.edi._formatfield('1,23-',tfield1,testdummy), '-1.23','other dec.sig, replace')
self.edi.ta_info['decimaal']='.'
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'1234',tfield1,testdummy) #field too large
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'0001',tfield1,testdummy) #leading zeroes; field too large
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'-1.234',tfield1,testdummy) #field too large
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'1<3',tfield1,testdummy) #wrong char
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'1-3',tfield1,testdummy) #'-' in middel of number
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'1,3',tfield1,testdummy) #',', where ',' is not traid sep.
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'1+3',tfield1,testdummy) #'+' in middle of number
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'13+',tfield1,testdummy) #'+' in middle of number
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'0.100',tfield1,testdummy) #field too big
#test field to short
tfield2 = ['TEST1', 'M', 8, 'N', True, 0, 5,'R']
self.assertEqual(self.edi._formatfield('12345',tfield2,testdummy), '12345','just large enough')
self.assertEqual(self.edi._formatfield('0.1000',tfield2,testdummy), '0.1000','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('00001',tfield2,testdummy), '1','remove leading zeroes')
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'1235',tfield2,testdummy) #field too short
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'-12.34',tfield2,testdummy) #field too short
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'-',tfield2,testdummy) #field too short
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'.',tfield2,testdummy) #field too short
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'-.',tfield2,testdummy) #field too short
#WARN: dubious tests. This is Bots filosophy: be flexible in input, be right in output.
self.assertEqual(self.edi._formatfield('123-',tfield1,testdummy), '-123','numeric field minus at end')
self.assertEqual(self.edi._formatfield('.001',tfield1,testdummy), '0.001','if no zero before dec.sign, length>max.length')
self.assertEqual(self.edi._formatfield('+13',tfield1,testdummy), '13','plus is allowed') #WARN: if plus used, plus is countd in length!!
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'12E+3',tfield2,testdummy) #field too large
tfield4 = ['TEST1', 'M', 8, 'N', True, 3, 0,'R']
self.assertEqual(self.edi._formatfield('123.4561',tfield4,testdummy), '123.4561','no checking to many digits incoming') #should round here?
tfield4 = ['TEST1', 'M', 80, 'N', True, 3, 0,'R']
self.assertEqual(self.edi._formatfield('12345678901234560',tfield4,testdummy), '12345678901234560','lot of digits')
self.edi.ta_info['lengthnumericbare']=False
self.assertEqual(self.edi._formatfield('-1.45',tfield2,testdummy), '-1.45','just large enough')
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'-12345678',tfield2,testdummy) #field too large
def testformatfieldN(self):
self.edi.ta_info['lengthnumericbare']=True
tfield1 = ['TEST1', 'M', 3, 'R', True, 2, 0,'N']
# length decimals minlength
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'',tfield1,testdummy) #empty string
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'1',tfield1,testdummy) #empty string
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'0',tfield1,testdummy) #empty string
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'-0',tfield1,testdummy) #empty string
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'01.00',tfield1,testdummy) #empty string
self.assertEqual(self.edi._formatfield('1.00',tfield1,testdummy), '1.00', 'basic')
self.assertEqual(self.edi._formatfield('0.00',tfield1,testdummy), '0.00','zero stays zero')
self.assertEqual(self.edi._formatfield('-0.00',tfield1,testdummy), '-0.00','neg.zero stays neg.zero')
self.assertEqual(self.edi._formatfield('-.12',tfield1,testdummy), '-0.12','no zero before dec,sign is OK')
self.assertEqual(self.edi._formatfield('1.23',tfield1,testdummy), '1.23','numeric field at max')
self.assertEqual(self.edi._formatfield('0.10',tfield1,testdummy), '0.10','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('-1.23',tfield1,testdummy), '-1.23','numeric field at max with minus and decimal sign')
self.edi.ta_info['decimaal']=','
self.assertEqual(self.edi._formatfield('1,23-',tfield1,testdummy), '-1.23','other dec.sig, replace')
self.edi.ta_info['decimaal']='.'
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'1234',tfield1,testdummy) #field too large
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'0001',tfield1,testdummy) #leading zeroes; field too large
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'-1.234',tfield1,testdummy) #field too large
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'1<3',tfield1,testdummy) #wrong char
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'1-3',tfield1,testdummy) #'-' in middel of number
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'1,3',tfield1,testdummy) #',', where ',' is not traid sep.
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'1+3',tfield1,testdummy) #'+' in middle of number
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'13+',tfield1,testdummy) #'+' in middle of number
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'0.100',tfield1,testdummy) #field too big
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'12E+3',tfield1,testdummy) #no exp
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'.',tfield1,testdummy) #no exp
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'-',tfield1,testdummy) #no exp
#test field to short
tfield2 = ['TEST1', 'M', 8, 'R', True, 4, 5,'N']
self.assertEqual(self.edi._formatfield('1.2345',tfield2,testdummy), '1.2345','just large enough')
self.assertEqual(self.edi._formatfield('0.1000',tfield2,testdummy), '0.1000','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('001.1234',tfield2,testdummy), '1.1234','remove leading zeroes')
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'1235',tfield2,testdummy) #field too short
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'-12.34',tfield2,testdummy) #field too short
#WARN: dubious tests. This is Bots filosophy: be flexible in input, be right in output.
self.assertEqual(self.edi._formatfield('1234.1234-',tfield2,testdummy), '-1234.1234','numeric field - minus at end')
self.assertEqual(self.edi._formatfield('.01',tfield1,testdummy), '0.01','if no zero before dec.sign, length>max.length')
self.assertEqual(self.edi._formatfield('+13.1234',tfield2,testdummy), '13.1234','plus is allowed') #WARN: if plus used, plus is counted in length!!
tfield3 = ['TEST1', 'M', 18, 'R', True, 0, 0,'N']
tfield4 = ['TEST1', 'M', 8, 'R', True, 3, 0,'N']
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'123.4561',tfield4,testdummy) #to many digits
def testformatfieldI(self):
self.edi.ta_info['lengthnumericbare']=True
tfield1 = ['TEST1', 'M', 5, 'I', True, 2, 0,'I']
# length decimals minlength
self.assertEqual(self.edi._formatfield('',tfield1,testdummy), '0.00', 'empty numeric is accepted, is zero')
self.assertEqual(self.edi._formatfield('123',tfield1,testdummy), '1.23', 'basic')
self.assertEqual(self.edi._formatfield('1',tfield1,testdummy), '0.01', 'basic')
self.assertEqual(self.edi._formatfield('0',tfield1,testdummy), '0.00','zero stays zero')
self.assertEqual(self.edi._formatfield('-0',tfield1,testdummy), '-0.00','neg.zero stays neg.zero')
self.assertEqual(self.edi._formatfield('-000',tfield1,testdummy), '-0.00','')
self.assertEqual(self.edi._formatfield('-12',tfield1,testdummy), '-0.12','no zero before dec,sign is OK')
self.assertEqual(self.edi._formatfield('12345',tfield1,testdummy), '123.45','numeric field at max')
self.assertEqual(self.edi._formatfield('00001',tfield1,testdummy), '0.01','leading zeroes are removed')
self.assertEqual(self.edi._formatfield('010',tfield1,testdummy), '0.10','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('-99123',tfield1,testdummy), '-991.23','numeric field at max with minus and decimal sign')
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'123456',tfield1,testdummy) #field too large
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'000100',tfield1,testdummy) #field too large
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'000001',tfield1,testdummy) #leading zeroes; field too large
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'12<3',tfield1,testdummy) #wrong char
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'12-3',tfield1,testdummy) #'-' in middel of number
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'12,3',tfield1,testdummy) #','.
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'12+3',tfield1,testdummy) #'+' in middle of number
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'123+',tfield1,testdummy) #'+'
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'12E+3',tfield1,testdummy) #'+'
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'-',tfield1,testdummy) #only -
#~ #test field to short
tfield2 = ['TEST1', 'M', 8, 'I', True, 2, 5,'I']
self.assertEqual(self.edi._formatfield('12345',tfield2,testdummy), '123.45','just large enough')
self.assertEqual(self.edi._formatfield('10000',tfield2,testdummy), '100.00','keep zeroes after last dec.digit')
self.assertEqual(self.edi._formatfield('00100',tfield2,testdummy), '1.00','remove leading zeroes')
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'1235',tfield2,testdummy) #field too short
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'-1234',tfield2,testdummy) #field too short
tfield3 = ['TEST1', 'M', 18, 'I', True, 0, 0,'I']
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'12E+3',tfield3,testdummy) #no exponent
#~ #WARN: dubious tests. This is Bots filosophy: be flexible in input, be right in output.
self.assertEqual(self.edi._formatfield('123-',tfield1,testdummy), '-1.23','numeric field minus at end')
self.assertEqual(self.edi._formatfield('+13',tfield1,testdummy), '0.13','plus is allowed') #WARN: if plus used, plus is countd in length!!
def testformatfieldD(self):
tfield1 = ['TEST1', 'M', 20, 'D', True, 0, 0,'D']
# length decimals minlength
self.assertEqual(self.edi._formatfield('20071001',tfield1,testdummy), '20071001','basic')
self.assertEqual(self.edi._formatfield('071001',tfield1,testdummy), '071001','basic')
self.assertEqual(self.edi._formatfield('99991001',tfield1,testdummy), '99991001','max year')
self.assertEqual(self.edi._formatfield('00011001',tfield1,testdummy), '00011001','min year')
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'2007093112',tfield1,testdummy) #too long
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'20070931',tfield1,testdummy) #no valid date
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'-0070931',tfield1,testdummy) #no valid date
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'70931',tfield1,testdummy) #too short
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'0931',tfield1,testdummy) #too short
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'0931BC',tfield1,testdummy) #alfanum
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'OOOOBC',tfield1,testdummy) #alfanum
def testformatfieldT(self):
tfield1 = ['TEST1', 'M', 10, 'T', True, 0, 0,'T']
# length decimals minlength
self.assertEqual(self.edi._formatfield('2359',tfield1,testdummy), '2359','basic')
self.assertEqual(self.edi._formatfield('0000',tfield1,testdummy), '0000','basic')
self.assertEqual(self.edi._formatfield('000000',tfield1,testdummy), '000000','basic')
self.assertEqual(self.edi._formatfield('230000',tfield1,testdummy), '230000','basic')
self.assertEqual(self.edi._formatfield('235959',tfield1,testdummy), '235959','basic')
self.assertEqual(self.edi._formatfield('123456',tfield1,testdummy), '123456','basic')
self.assertEqual(self.edi._formatfield('0931233',tfield1,testdummy), '0931233','basic')
self.assertEqual(self.edi._formatfield('09312334',tfield1,testdummy), '09312334','basic')
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'240001',tfield1,testdummy) #no valid date
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'126101',tfield1,testdummy) #no valid date
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'120062',tfield1,testdummy) #no valid date - python allows 61 secnds?
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'240000',tfield1,testdummy) #no valid date
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'250001',tfield1,testdummy) #no valid date
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'-12000',tfield1,testdummy) #no valid date
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'120',tfield1,testdummy) #too short
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'11PM',tfield1,testdummy) #alfanum
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'TIME',tfield1,testdummy) #alfanum
def testformatfieldA(self):
tfield1 = ['TEST1', 'M', 5, 'T', True, 0, 0,'A']
# length decimals minlength
self.assertEqual(self.edi._formatfield('abcde',tfield1,testdummy), 'abcde','basic')
self.assertEqual(self.edi._formatfield('',tfield1,testdummy), '','basic')
self.assertEqual(self.edi._formatfield('',tfield1,testdummy), '','basic')
self.assertEqual(self.edi._formatfield(' ab',tfield1,testdummy), 'ab','basic')
self.assertEqual(self.edi._formatfield('ab ',tfield1,testdummy), 'ab','basic')
self.assertEqual(self.edi._formatfield(' ab',tfield1,testdummy), 'ab','basic')
self.assertEqual(self.edi._formatfield('ab ',tfield1,testdummy), 'ab','basic')
self.assertEqual(self.edi._formatfield('a b',tfield1,testdummy), 'a b','basic')
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'abcdef',tfield1,testdummy) #no valid date
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'ab ',tfield1,testdummy) #no valid date
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,' ab',tfield1,testdummy) #no valid date - python allows 61 secnds?
tfield1 = ['TEST1', 'M', 5, 'T', True, 0, 2,'A']
# length decimals minlength
self.assertEqual(self.edi._formatfield('abcde',tfield1,testdummy), 'abcde','basic')
self.assertEqual(self.edi._formatfield(' ab',tfield1,testdummy), 'ab','basic')
self.assertEqual(self.edi._formatfield('ab ',tfield1,testdummy), 'ab','basic')
self.assertEqual(self.edi._formatfield(' ab',tfield1,testdummy), 'ab','basic')
self.assertEqual(self.edi._formatfield('ab ',tfield1,testdummy), 'ab','basic')
self.assertEqual(self.edi._formatfield('a b',tfield1,testdummy), 'a b','basic')
self.assertEqual(self.edi._formatfield(' ',tfield1,testdummy), '','basic')
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'a',tfield1,testdummy) #no valid date
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'abcdef',tfield1,testdummy) #no valid date
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'ab ',tfield1,testdummy) #no valid date
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,' ab',tfield1,testdummy) #no valid date - python allows 61 secnds?
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,' ',tfield1,testdummy) #no valid date - python allows 61 secnds?
self.assertRaises(botslib.InMessageFieldError,self.edi._formatfield,'',tfield1,testdummy) #no valid date - python allows 61 secnds?
def testEdifact0402(self):
# old format test are run
self.assertRaises(botslib.InMessageFieldError,inmessage.edifromfile,editype='edifact',
messagetype='edifact',filename='botssys/infile/unitformats/040201F.edi')
self.assertRaises(botslib.InMessageFieldError,inmessage.edifromfile,editype='edifact',
messagetype='edifact',filename='botssys/infile/unitformats/040202F.edi')
self.assertRaises(botslib.InMessageFieldError,inmessage.edifromfile,editype='edifact',
messagetype='edifact',filename='botssys/infile/unitformats/040203F.edi')
self.assertRaises(botslib.InMessageFieldError,inmessage.edifromfile,editype='edifact',
messagetype='edifact',filename='botssys/infile/unitformats/040204F.edi')
self.assertRaises(botslib.InMessageFieldError,inmessage.edifromfile,editype='edifact',
messagetype='edifact',filename='botssys/infile/unitformats/040205F.edi')
self.assertRaises(botslib.InMessageError,inmessage.edifromfile,editype='edifact',
messagetype='edifact',filename='botssys/infile/unitformats/040206F.edi')
self.assertRaises(botslib.InMessageError,inmessage.edifromfile,editype='edifact',
messagetype='edifact',filename='botssys/infile/unitformats/040207F.edi')
self.assertRaises(botslib.InMessageError,inmessage.edifromfile,editype='edifact',
messagetype='edifact',filename='botssys/infile/unitformats/040208F.edi')
self.assertRaises(botslib.InMessageError,inmessage.edifromfile,editype='edifact',
messagetype='edifact',filename='botssys/infile/unitformats/040209F.edi')
self.assertRaises(botslib.InMessageFieldError,inmessage.edifromfile,editype='edifact',
messagetype='edifact',filename='botssys/infile/unitformats/040210F.edi')
self.assertRaises(botslib.InMessageFieldError,inmessage.edifromfile,editype='edifact',
messagetype='edifact',filename='botssys/infile/unitformats/040211F.edi')
self.assertRaises(botslib.InMessageFieldError,inmessage.edifromfile,editype='edifact',
messagetype='edifact',filename='botssys/infile/unitformats/040212F.edi')
self.failUnless(inmessage.edifromfile(editype='edifact',
messagetype='edifact',filename='botssys/infile/unitformats/040214T.edi'), 'standaard test')
self.assertRaises(botslib.InMessageFieldError,inmessage.edifromfile,editype='edifact',
messagetype='edifact',filename='botssys/infile/unitformats/040215F.edi')
self.assertRaises(botslib.InMessageFieldError,inmessage.edifromfile,editype='edifact',
messagetype='edifact',filename='botssys/infile/unitformats/040217F.edi')
self.assertRaises(botslib.InMessageFieldError,inmessage.edifromfile,editype='edifact',
messagetype='edifact',filename='botssys/infile/unitformats/040218F.edi')
self.assertRaises(botslib.InMessageFieldError,inmessage.edifromfile,editype='edifact',
messagetype='edifact',filename='botssys/infile/unitformats/040219F.edi')
if __name__ == '__main__':
botsinit.generalinit('config')
#~ botslib.initbotscharsets()
botsinit.initenginelogging()
unittest.main()
| Python |
import copy
import os
import glob
import bots.inmessage as inmessage
import bots.outmessage as outmessage
def comparenode(node1,node2org):
node2 = copy.deepcopy(node2org)
if node1.record is not None and node2.record is None:
print 'node2 is "None"'
return False
if node1.record is None and node2.record is not None:
print 'node1 is "None"'
return False
return comparenodecore(node1,node2)
def comparenodecore(node1,node2):
if node1.record is None and node2.record is None:
pass
else:
for key,value in node1.record.items():
if key not in node2.record:
print 'key not in node2', key,value
return False
elif node2.record[key]!=value:
print 'unequal attr', key,value,node2.record[key]
return False
for key,value in node2.record.items():
if key not in node1.record:
print 'key not in node1', key,value
return False
elif node1.record[key]!=value:
print 'unequal attr', key,value,node1.record[key]
return False
if len(node1.children) != len(node2.children):
print 'number of children not equal'
return False
for child1 in node1.children:
for i,child2 in enumerate(node2.children):
if child1.record['BOTSID'] == child2.record['BOTSID']:
if comparenodecore(child1,child2) != True:
return False
del node2.children[i:i+1]
break
else:
print 'Found no matching record in node2 for',child1.record
return False
return True
def readfilelines(bestand):
fp = open(bestand,'rU')
terug=fp.readlines()
fp.close()
return terug
def readfile(bestand):
fp = open(bestand,'rU')
terug=fp.read()
fp.close()
return terug
def readwrite(filenamein='',filenameout='',**args):
inn = inmessage.edifromfile(filename=filenamein,**args)
out = outmessage.outmessage_init(filename=filenameout,divtext='',topartner='',**args) #make outmessage object
out.root = inn.root
out.writeall()
def getdirbysize(path):
''' read fils in directory path, return as a sorted list.'''
lijst = getdir(path)
lijst.sort(key=lambda s: os.path.getsize(s))
return lijst
def getdir(path):
''' read files in directory path, return incl length.'''
return [s for s in glob.glob(path) if os.path.isfile(s)]
| Python |
import os
import sys
import copy
import inmessage
import outmessage
import botslib
import botsinit
import botsglobal
from botsconfig import *
#buggy
#in usersys/grammars/xmlnocheck should be a file xmlnocheck
#usage: c:\python25\python bots-xml2botsgrammar.py botssys/infile/test.xml botssys/infile/resultgrammar.py -cconfig
def treewalker(node,mpath):
mpath.append({'BOTSID':node.record['BOTSID']})
for childnode in node.children:
yield childnode,mpath[:]
for terugnode,terugmpath in treewalker(childnode,mpath):
yield terugnode,terugmpath
mpath.pop()
def writefields(tree,node,mpath):
putmpath = copy.deepcopy(mpath)
#~ print mpath
#~ print node.record
for key in node.record.keys():
#~ print key
if key != 'BOTSID':
putmpath[-1][key]=u'dummy'
#~ print 'mpath used',mpath
#~ putmpath = copy.deepcopy(mpath)
tree.put(*putmpath)
#~ del mpath[-1][key]
#~ print '\n'
def tree2grammar(node,structure,recorddefs):
structure.append({ID:node.record['BOTSID'],MIN:0,MAX:99999,LEVEL:[]})
recordlist = []
for key in node.record.keys():
recordlist.append([key, 'C', 256, 'AN'])
if node.record['BOTSID'] in recorddefs:
recorddefs[node.record['BOTSID']] = removedoublesfromlist(recorddefs[node.record['BOTSID']] + recordlist)
#~ recorddefs[node.record['BOTSID']].extend(recordlist)
else:
recorddefs[node.record['BOTSID']] = recordlist
for childnode in node.children:
tree2grammar(childnode,structure[-1][LEVEL],recorddefs)
def recorddefs2string(recorddefs,sortedstructurelist):
recorddefsstring = "{\n"
for i in sortedstructurelist:
#~ for key, value in recorddefs.items():
recorddefsstring += " '%s':\n [\n"%i
for field in recorddefs[i]:
if field[0]=='BOTSID':
field[1]='M'
recorddefsstring += " %s,\n"%field
break
for field in recorddefs[i]:
if '__' in field[0]:
recorddefsstring += " %s,\n"%field
for field in recorddefs[i]:
if field[0]!='BOTSID' and '__' not in field[0]:
recorddefsstring += " %s,\n"%field
recorddefsstring += " ],\n"
recorddefsstring += " }\n"
return recorddefsstring
def structure2string(structure,level=0):
structurestring = ""
for i in structure:
structurestring += level*" " + "{ID:'%s',MIN:%s,MAX:%s"%(i[ID],i[MIN],i[MAX])
recursivestructurestring = structure2string(i[LEVEL],level+1)
if recursivestructurestring:
structurestring += ",LEVEL:[\n" + recursivestructurestring + level*" " + "]},\n"
else:
structurestring += "},\n"
return structurestring
def structure2list(structure):
structurelist = structure2listcore(structure)
return removedoublesfromlist(structurelist)
def removedoublesfromlist(orglist):
list2return = []
for e in orglist:
if e not in list2return:
list2return.append(e)
return list2return
def structure2listcore(structure):
structurelist = []
for i in structure:
structurelist.append(i[ID])
structurelist.extend(structure2listcore(i[LEVEL]))
return structurelist
def showusage():
print
print 'Usage:'
print ' %s -c<directory> <xml_file> <xml_grammar_file>'%os.path.basename(sys.argv[0])
print
print ' Creates a grammar from an xml file.'
print ' Options:'
print " -c<directory> directory for configuration files (default: config)."
print ' <xml_file> name of the xml file to read'
print ' <xml_grammar_file> name of the grammar file to write'
print
sys.exit(0)
def start():
#********command line arguments**************************
edifile =''
grammarfile = ''
configdir = 'config'
for arg in sys.argv[1:]:
if not arg:
continue
if arg.startswith('-c'):
configdir = arg[2:]
if not configdir:
print ' !!Indicated Bots should use specific .ini file but no file name was given.'
showusage()
elif arg in ["?", "/?"] or arg.startswith('-'):
showusage()
else:
if not edifile:
edifile = arg
else:
grammarfile = arg
if not (edifile and grammarfile):
print ' !!Both edifile and grammarfile are required.'
showusage()
#********end handling command line arguments**************************
editype='xmlnocheck'
messagetype='xmlnocheckxxxtemporaryforxml2grammar'
mpath = []
botsinit.generalinit(configdir)
os.chdir(botsglobal.ini.get('directories','botspath'))
botsinit.initenginelogging()
#the xml file is parsed as an xmlnocheck message....so a (temp) xmlnocheck grammar is needed....without content... this file is not removed....
tmpgrammarfile = botslib.join(botsglobal.ini.get('directories','usersysabs'),'grammars',editype,messagetype+'.py')
f = open(tmpgrammarfile,'w')
f.close()
inn = inmessage.edifromfile(editype=editype,messagetype=messagetype,filename=edifile)
#~ inn.root.display()
out = outmessage.outmessage_init(editype=editype,messagetype=messagetype,filename='botssys/infile/unitnode/output/inisout03.edi',divtext='',topartner='') #make outmessage object
#handle root
rootmpath = [{'BOTSID':inn.root.record['BOTSID']}]
out.put(*rootmpath)
writefields(out,inn.root,rootmpath)
#walk tree; write results to out-tree
for node,mpath in treewalker(inn.root,mpath):
mpath.append({'BOTSID':node.record['BOTSID']})
if out.get(*mpath) is None:
out.put(*mpath)
writefields(out,node,mpath)
#~ out.root.display()
#out-tree is finished; represents ' normalised' tree suited for writing as a grammar
structure = []
recorddefs = {}
tree2grammar(out.root,structure,recorddefs)
#~ for key,value in recorddefs.items():
#~ print key,value
#~ print '\n'
sortedstructurelist = structure2list(structure)
recorddefsstring = recorddefs2string(recorddefs,sortedstructurelist)
structurestring = structure2string(structure)
#write grammar file
grammar = open(grammarfile,'wb')
grammar.write('#grammar automatically generated by bots open source edi translator.')
grammar.write('\n')
grammar.write('from bots.botsconfig import *')
grammar.write('\n\n')
grammar.write('syntax = {}')
grammar.write('\n\n')
grammar.write('structure = [\n%s]\n'%(structurestring))
grammar.write('\n\n')
grammar.write('recorddefs = %s'%(recorddefsstring))
grammar.write('\n\n')
grammar.close()
print 'grammar file is written',grammarfile
if __name__ == '__main__':
start()
| Python |
#constants/definitions for Bots
#to be used as from bots.config import *
#for statust in ta:
OPEN = 0 #Bots always closes transaction. OPEN is severe error
ERROR = 1 #error in transaction.
OK = 2 #successfull, result is 'save'. But processing has stopped: next step with error, or no next steps defined
DONE = 3 #successfull, and result is picked up by next step
#for status in ta:
PROCESS = 1
DISCARD= 3
EXTERNIN = 200 #transaction is OK; file is exported; out of reach
RAWIN = 210 #the file as received, unprocessed; eg mail is in email-format (headers, body, attachments)
MIMEIN = 215 #mime is checked and read; mime-info (sender, receiver) is in db-ta
FILEIN = 220 #received edifile; ready for further use
SET_FOR_PROCESSING = 230
TRANSLATE = 300 #file to be translated
PARSED = 310 #the edifile is lexed and parsed
SPLITUP = 320 #the edimessages in the PARSED edifile have been split up
TRANSLATED = 330 #edimessage is result of translation
MERGED = 400 #is enveloped
FILEOUT = 500 #edifile ready to be 'send' (just the edi-file)
RAWOUT = 510 #file in send format eg email format (including headers, body, attachemnts)
EXTERNOUT = 520 #transaction is complete; file is exported; out of reach
#grammar.structure: keys in grammarrecords
ID = 0
MIN = 1
MAX = 2
COUNT = 3
LEVEL = 4
MPATH = 5
FIELDS = 6
QUERIES = 7
SUBTRANSLATION = 8
BOTSIDnr = 9
#grammar.recorddefs: dict keys for fields of record er: record[FIELDS][ID] == 'C124.0034'
#already definedID = 0
MANDATORY = 1
LENGTH = 2
SUBFIELDS = 2 #for composites
FORMAT = 3 #format in grammar file
ISFIELD = 4
DECIMALS = 5
MINLENGTH = 6
BFORMAT = 7 #internal bots format; formats in grammar are convertd to bformat
#modules inmessage, outmessage; record in self.records; ex:
#already defined ID = 0
VALUE = 1
POS = 2
LIN = 3
SFIELD = 4 #boolean: True: is subfield, False: field or first element composite
#already defined MPATH = 5 #only for first field (=recordID)
FIXEDLINE = 6 #for fixed records; tmp storage of fixed record
FORMATFROMGRAMMAR = 7 #to store FORMAT field has in grammar
| Python |
"""
sef2bots.py
Command line params: sourcefile.sef targetfile.py
Optional command line params: -seq, -struct
Converts a SEF grammar into a Bots grammar. If targetfile exists (and is writeable),
it will be overwritten.
If -seq is specified, field names in record definitions will be
sequential (TAG01, TAG02, ..., where TAG is the record tag) instead of
the 'normal' field names.
If -struct is specified, only the Bots grammar variable 'structure' will be
constructed, i.e. the 'recorddefs' variable will be left out.
Parses the .SETS, .SEGS, .COMS and .ELMS sections. Any other sections are ignored.
(Mostly) assumes correct SEF syntax. May well break on some syntax errors.
If there are multiple .SETS sections, only the last one is processed. If there are
multiple message definitions in the (last) .SETS section, only the last one is
processed.
If there are multiple definitions of a segment, only the first one is taken
into account.
If there are multiple definitions of a field, only the last one is taken into
account.
Skips ^ and ignores .!$-&*@ in segment/field refs.
Also ignores syntax rules and dependency notes.
Changes seg max of '>1' to 99999 and elm maxlength to 99999 if 0 or > 99999
If you don't like that, change the 'constants' MAXMAX and/or MAXLEN below
"""
MAXMAX = 99999 # for dealing with segs/groups with max '>1'
MAXLEN = 99999 # for overly large elm maxlengths
TAB = ' '
import sys
import os
import copy
import atexit
import traceback
def showusage(scriptname):
print "Usage: python %s [-seq] [-nostruct] [-norecords] sourcefile targetfile" % scriptname
print " Convert SEF grammar in <sourcefile> into Bots grammar in <targetfile>."
print " Option -seq : use sequential numbered fields in record definitions instead of field names/ID's."
print " Option -nostruct : the 'structure' will not be written."
print " Option -norecords : the 'records' will not be written."
print
sys.exit(0)
class SEFError(Exception):
pass
class StructComp(object):
""" For components of the Bots grammar variable 'structure' """
def __init__(self, tag, min, max, sub = None):
self.id = tag
self.min = min
self.max = max
if sub:
self.sub = sub
else:
self.sub = []
def tostring(self, tablevel = 0):
s = tablevel*TAB + "{ID: '%s', MIN: %d, MAX: %d" % (self.id, self.min, self.max)
if self.sub:
s += ", LEVEL: [\n" \
+ ",\n".join([subcomp.tostring(tablevel + 1) for subcomp in self.sub]) + "," \
+ "\n" + tablevel * TAB + "]"
s += "}"
return s
class RecDef(object):
""" For records/segments; these end up in the Bots grammar variable 'recorddefs' """
def __init__(self, tag, sub = None):
self.id = tag
if sub:
self.sub = sub
else:
self.sub = []
def tostring(self, useseq = False, tablevel = 0):
return tablevel*TAB + \
"'%s': [\n"%(self.id) + \
"\n".join([c.tostring(useseq, tablevel+1) for c in self.sub]) +\
"\n" + \
tablevel*TAB + "],"
class FieldDef(object):
""" For composite and non-composite fields """
def __init__(self, tag, req = 'C', minlen = '', maxlen = '', type = 'AN', sub = None, freq = 1, seq = None):
self.id = tag
self.req = req
self.minlen = minlen
self.maxlen = maxlen
self.type = type
self.sub = sub
if not sub:
self.sub = []
self.freq = freq
self.seq = seq
def tostring(self, useseq = False, tablevel = 0):
if not useseq:
fldname = self.id
else:
fldname = self.seq
if not self.sub:
if self.minlen.strip() == '1':
return tablevel * TAB + "['%s', '%s', %s, '%s']" %\
(fldname, self.req, self.maxlen, self.type) + ","
else:
return tablevel * TAB + "['%s', '%s', (%s, %s), '%s']" %\
(fldname, self.req, self.minlen, self.maxlen, self.type) + ","
else:
return tablevel * TAB + "['%s', '%s', [\n" % (fldname, self.req) \
+ "\n".join([field.tostring(useseq, tablevel + 1) for field in self.sub]) \
+ "\n" + tablevel * TAB + "]],"
def split2(line, seps):
"""
Split <line> on whichever character in <seps> occurs in <line> first.
Return pair (line_up_to_first_sep_found, first_sep_found_plus_rest_of_line)
If none of <seps> occurs, return pair ('', <line>)
"""
i = 0
length = len(line)
while i < length and line[i] not in seps:
i += 1
if i == length:
return '', line
return line[:i], line[i:]
def do_set(line):
"""
Reads the (current) .SETS section and converts it into a Bots grammar 'structure'.
Returns the *contents* of the structure, as a string.
"""
definition = line.split('=')
line = definition[1].lstrip('^')
comps = readcomps(line)
tree = comps[0]
tree.sub = comps[1:]
return tree.tostring()
def readcomps(line):
""" Reads all components from a .SETS line, and returns them in a nested list """
comps = []
while line:
comp, line = readcomp(line)
comps.append(comp)
#~ displaystructure(comps)
return comps
def displaystructure(comps,tablevel=0):
for i in comps:
print tablevel*TAB, i.id,i.min,i.max
if i.sub:
displaystructure(i.sub,tablevel+1)
def readcomp(line):
"""
Reads a component, which can be either a segment or a segment group.
Returns pair (component, rest_of_line)
"""
discard, line = split2(line, "[{")
if not line:
return None, ''
if line[0] == '[':
return readseg(line)
if line[0] == '{':
return readgroup(line)
raise SEFError("readcomp() - unexpected character at start of: %s" % line)
def readseg(line):
""" Reads a single segment. Returns pair (segment, rest_of_line) """
discard, line = line.split('[', 1)
segstr, line = line.split(']', 1)
components = segstr.split(',')
num = len(components)
maxstr = ''
if num == 3:
tag, req, maxstr = components
elif num == 2:
tag, req = components
elif num == 1:
tag, req = components[0], 'C'
if req == 'M':
min = 1
else:
min = 0
if tag[0] in ".!$-&":
tag = tag[1:]
if '*' in tag:
tag = tag.split('*')[0]
if '@' in tag:
tag = tag.split('@')[0]
if tag.upper() == 'LS':
print "LS segment found"
if not maxstr:
max = 1
elif maxstr == '>1':
max = MAXMAX
print "Changed max for seg '%s' to %d (orig. %s)" % (tag, MAXMAX, maxstr)
else:
max = int(maxstr)
return StructComp(tag, min, max), line
def readgroup(line):
""" Reads a segment group. Returns pair (segment_group, rest_of_line) """
discard, line = line.split('{', 1)
#~ print '>>',line
tag, line = split2(line, ':+-[{')
#~ print '>>',tag,'>>',line
maxstr = ''
if line[0] == ':': # next element can be group.max
maxstr, line = split2(line[1:], '+-[{')
#~ print '>>',line
discard, line = split2(line, "[{")
group = StructComp(tag, 0, 0) # dummy values for group. This is later on adjusted
done = False
while not done:
if not line or line[0] == '}':
done = True
else:
comp, line = readcomp(line)
group.sub.append(comp)
if group.sub:
header = group.sub[0]
group.id = header.id #use right tag for header segment
if header.min > group.min:
group.min = header.min
group.sub = group.sub[1:]
if not maxstr:
group.max = 1
else:
if maxstr != '>1':
group.max = int(maxstr)
else:
group.max = MAXMAX
if tag:
oldtag = tag
else:
oldtag = group.id
print "Changed max for group '%s' to %d (orig. %s)" % (oldtag, MAXMAX, maxstr)
return group, line[1:]
def comdef(line, issegdef = False):
"""
Reads segment or composite definition (syntactically identical; defaults to composite).
Returns RecDef (for segment) or FieldDef (for composite)
"""
tag, spec = line.split('=')
if issegdef:
com = RecDef(tag)
else:
com = FieldDef(tag)
com.sub = getfields(spec)[0]
return com
def getfields(line, isgroup = False):
""" Returns pair (fieldlist, rest_of_line) """
if isgroup and line[0] == '}':
return [], line[1:]
if not isgroup and not line:
return [], ''
if not isgroup and line[0] in ",+":
return [], line[1:]
if line[0] == '[':
field, line = getfield(line[1:])
multifield = [field]
for i in range(1, field.freq):
extrafield = copy.deepcopy(field)
extrafield.req = 'C'
multifield.append(extrafield)
fields, line = getfields(line, isgroup)
return multifield + fields, line
if line[0] == '{':
multstr, line = split2(line[1:], "[{")
if not multstr:
mult = 1
else:
mult = int(multstr)
group, line = getfields(line, True)
repgroup = []
for i in range(mult):
repgroup += copy.deepcopy(group)
fields, line = getfields(line, isgroup)
return repgroup + fields, line
def getfield(line):
""" Returns pair (single_field_ref, rest_of_line) """
splits = line.split(']', 1)
field = fielddef(splits[0])
if len(splits) == 1:
return field, ''
return field, splits[1]
def fielddef(line):
"""
Get a field's tag, its req (M or else C), its min and max lengths, and its frequency (repeat count).
Return FieldDef
"""
if line[0] in ".!$-&":
line = line[1:]
if ',' not in line:
req, freq = 'C', 1
else:
splits = line.split(',')
num = len(splits)
if num == 3:
line, req, freq = splits
freq = int(freq)
elif num == 2:
(line, req), freq = splits, 1
else:
line, req, freq = splits[0], 'C', 1
if req != 'M':
req = 'C'
if ';' not in line:
lenstr = ''
else:
line, lenstr = line.split(';')
if '@' in line:
line, discard = line.split('@', 1)
if not lenstr:
minlen = maxlen = ''
else:
if ':' in lenstr:
minlen, maxlen = lenstr.split(':')
else:
minlen = lenstr
return FieldDef(line, req = req, minlen = minlen, maxlen = maxlen, freq = freq)
def elmdef(line):
""" Reads elm definition (min and max lengths and data type), returns FieldDef """
tag, spec = line.split('=')
type, minlenstr, maxlenstr = spec.split(',')
try:
maxlen = int(maxlenstr)
except ValueError:
maxlen = 0
if maxlen == 0 or maxlen > MAXLEN:
print "Changed max length for elm '%s' to %d (orig. %s)" % (tag, MAXLEN, maxlenstr)
maxlenstr = str(MAXLEN)
elm = FieldDef(tag, minlen = minlenstr, maxlen = maxlenstr, type = type)
return elm
def getelmsinfo(elms, coms):
"""
Get types and lengths from elm defs into com defs,
and rename multiple occurrences of subfields
"""
for comid in coms:
com = coms[comid]
counters = {}
sfids = [sf.id for sf in com.sub]
for i, sfield in enumerate(com.sub):
sfield.seq = "%02d" % (i + 1)
if sfield.id not in elms:
raise SEFError("getelmsinfo() - no subfield definition found for element '%s'" % sfield.id)
elm = elms[sfield.id]
if not sfield.minlen:
sfield.minlen = elm.minlen
if not sfield.maxlen:
sfield.maxlen = elm.maxlen
sfield.type = elm.type
if sfield.id not in counters:
counters[sfield.id] = 1
else:
counters[sfield.id] += 1
if counters[sfield.id] > 1 or sfield.id in sfids[i + 1:]:
sfield.id += "#%d" % counters[sfield.id]
def getfieldsinfo(elms, coms, segs):
"""
Get types and lengths from elm defs and com defs into seg defs,
and rename multiple occurrences of fields. Also rename subfields
of composites to include the name of their parents.
Finally, add the necessary BOTSID element.
"""
for seg in segs:
counters = {}
fids = [f.id for f in seg.sub]
for i, field in enumerate(seg.sub):
field.seq = "%s%02d" % (seg.id, i + 1)
iscomposite = False
if field.id in elms:
elm = elms[field.id]
field.type = elm.type
if not field.minlen:
field.minlen = elm.minlen
if not field.maxlen:
field.maxlen = elm.maxlen
elif field.id in coms:
iscomposite = True
com = coms[field.id]
field.sub = copy.deepcopy(com.sub)
else:
raise SEFError("getfieldsinfo() - no field definition found for element '%s'" % field.id)
if not field.id in counters:
counters[field.id] = 1
else:
counters[field.id] += 1
if counters[field.id] > 1 or field.id in fids[i + 1:]:
field.id += "#%d" % counters[field.id]
if iscomposite:
for sfield in field.sub:
sfield.id = field.id + '.' + sfield.id
sfield.seq = field.seq + '.' + sfield.seq
seg.sub.insert(0, FieldDef('BOTSID', req = 'M', minlen = "1", maxlen = "3", type = "AN", seq = 'BOTSID'))
def convertfile(infile, outfile, useseq, nostruct, norecords,edifactversionID):
struct = ""
segdefs, segdict, comdefs, elmdefs = [], {}, {}, {}
# segdict just keeps a list of segs already found, so they don't get re-defined
in_sets = in_segs = in_coms = in_elms = False
#*******reading sef grammar***********************
for line in infile:
line = line.strip('\n')
if line:
if line[0] == '*': # a comment, skip
pass
elif line[0] == '.':
line = line.upper()
in_sets = in_segs = in_coms = in_elms = False
if line == '.SETS':
in_sets = True
elif line == '.SEGS':
in_segs = True
elif line == '.COMS':
in_coms = True
elif line == '.ELMS':
in_elms = True
else:
if in_sets:
struct = do_set(line)
elif not norecords: #if record need to be written
if in_segs:
seg = comdef(line, issegdef = True)
# if multiple defs for this seg, only do first one
if seg.id not in segdict:
segdict[seg.id] = 1
segdefs.append(seg)
elif in_coms:
com = comdef(line)
comdefs[com.id] = com
elif in_elms:
elm = elmdef(line)
elmdefs[elm.id] = elm
#*****writing bots grammar **************
outfile.write('from bots.botsconfig import *\n')
if not nostruct: #if structure: need syntax
outfile.write('from edifactsyntax3 import syntax\n')
if norecords: #if record need to import thee
outfile.write('from records%s import recorddefs\n\n'%edifactversionID)
#****************************************
if not nostruct:
outfile.write("\nstructure = [\n%s\n]\n" % struct)
if not norecords:
getelmsinfo(elmdefs, comdefs)
getfieldsinfo(elmdefs, comdefs, segdefs)
outfile.write("\nrecorddefs = {\n%s\n}\n" % "\n".join([seg.tostring(useseq) for seg in segdefs]))
def start(args):
useseq, nostruct, norecords, infilename, outfilename = False, False, False, None, None
for arg in args:
if not arg:
continue
if arg in ["-h", "--help", "?", "/?", "-?"]:
showusage(args[0].split(os.sep)[-1])
if arg == "-seq":
useseq = True
elif arg == "-nostruct":
nostruct = True
elif arg == "-norecords":
norecords = True
elif not infilename:
infilename = arg
elif not outfilename:
outfilename = arg
else:
showusage(args[0].split(os.sep)[-1])
if not infilename or not outfilename:
showusage(args[0].split(os.sep)[-1])
#************************************
infile = open(infilename, 'r')
outfile = open(outfilename, 'w')
edifactversionID = os.path.splitext(os.path.basename(outfilename))[0][6:]
print ' Convert sef->bots "%s".'%(outfilename)
convertfile(infile, outfile, useseq, nostruct, norecords,edifactversionID)
infile.close()
outfile.close()
if __name__ == "__main__":
try:
start(sys.argv[1:])
except:
traceback.print_exc()
else:
print "Done"
| Python |
#!/usr/bin/env python
import sys
import os
import logging
from logging.handlers import TimedRotatingFileHandler
from django.core.handlers.wsgi import WSGIHandler
from django.utils.translation import ugettext as _
import cherrypy
from cherrypy import wsgiserver
import botslib
import botsglobal
import botsinit
def showusage():
usage = '''
This is "%(name)s", a part of Bots open source edi translator - http://bots.sourceforge.net.
The %(name)s is the web server for bots; the interface (bots-monitor) can be accessed in a browser, eg 'http://localhost:8080'.
Usage:
%(name)s -c<directory>
Options:
-c<directory> directory for configuration files (default: config).
'''%{'name':os.path.basename(sys.argv[0])}
print usage
sys.exit(0)
def start():
#NOTE bots is always on PYTHONPATH!!! - otherwise it will not start.
#***command line arguments**************************
configdir = 'config'
for arg in sys.argv[1:]:
if not arg:
continue
if arg.startswith('-c'):
configdir = arg[2:]
if not configdir:
print 'Configuration directory indicated, but no directory name.'
sys.exit(1)
elif arg in ["?", "/?"] or arg.startswith('-'):
showusage()
else:
showusage()
#***init general: find locating of bots, configfiles, init paths etc.***********************
botsinit.generalinit(configdir)
#***initialise logging. This logging only contains the logging from bots-webserver, not from cherrypy.
botsglobal.logger = logging.getLogger('bots-webserver')
botsglobal.logger.setLevel(logging.DEBUG)
h = TimedRotatingFileHandler(botslib.join(botsglobal.ini.get('directories','logging'),'webserver.log'), backupCount=10)
fileformat = logging.Formatter("%(asctime)s %(levelname)-8s: %(message)s",'%Y%m%d %H:%M:%S')
h.setFormatter(fileformat)
botsglobal.logger.addHandler(h)
#***init cherrypy as webserver*********************************************
#global configuration for cherrypy
cherrypy.config.update({'global': {'log.screen': False, 'server.environment': botsglobal.ini.get('webserver','environment','production')}})
#cherrypy handling of static files
conf = {'/': {'tools.staticdir.on' : True,'tools.staticdir.dir' : 'media' ,'tools.staticdir.root': botsglobal.ini.get('directories','botspath')}}
servestaticfiles = cherrypy.tree.mount(None, '/media', conf) #None: no cherrypy application (as this only serves static files)
#cherrypy handling of django
servedjango = WSGIHandler() #was: servedjango = AdminMediaHandler(WSGIHandler()) but django does not need the AdminMediaHandler in this setup. is much faster.
#cherrypy uses a dispatcher in order to handle the serving of static files and django.
dispatcher = wsgiserver.WSGIPathInfoDispatcher({'/': servedjango, '/media': servestaticfiles})
botswebserver = wsgiserver.CherryPyWSGIServer(bind_addr=('0.0.0.0', botsglobal.ini.getint('webserver','port',8080)), wsgi_app=dispatcher, server_name=botsglobal.ini.get('webserver','name','bots-webserver'))
botsglobal.logger.info(_(u'Bots web-server started.'))
#handle ssl: cherrypy < 3.2 always uses pyOpenssl. cherrypy >= 3.2 uses python buildin ssl (python >= 2.6 has buildin support for ssl).
ssl_certificate = botsglobal.ini.get('webserver','ssl_certificate',None)
ssl_private_key = botsglobal.ini.get('webserver','ssl_private_key',None)
if ssl_certificate and ssl_private_key:
if cherrypy.__version__ >= '3.2.0':
adapter_class = wsgiserver.get_ssl_adapter_class('builtin')
botswebserver.ssl_adapter = adapter_class(ssl_certificate,ssl_private_key)
else:
#but: pyOpenssl should be there!
botswebserver.ssl_certificate = ssl_certificate
botswebserver.ssl_private_key = ssl_private_key
botsglobal.logger.info(_(u'Bots web-server uses ssl (https).'))
else:
botsglobal.logger.info(_(u'Bots web-server uses plain http (no ssl).'))
#***start the cherrypy webserver.
try:
botswebserver.start()
except KeyboardInterrupt:
botswebserver.stop()
if __name__=='__main__':
start()
| Python |
# Django settings for bots project.
import os
import bots
#*******settings for bots error reports**********************************
MANAGERS = ( #bots will send error reports to the MANAGERS
('name_manager', 'manager@domain.org'),
)
#~ EMAIL_HOST = 'smtp.gmail.com' #Default: 'localhost'
#~ EMAIL_PORT = '587' #Default: 25
#~ EMAIL_USE_TLS = True #Default: False
#~ EMAIL_HOST_USER = 'user@gmail.com' #Default: ''. Username to use for the SMTP server defined in EMAIL_HOST. If empty, Django won't attempt authentication.
#~ EMAIL_HOST_PASSWORD = '' #Default: ''. PASSWORD to use for the SMTP server defined in EMAIL_HOST. If empty, Django won't attempt authentication.
#~ SERVER_EMAIL = 'user@gmail.com' #Sender of bots error reports. Default: 'root@localhost'
#~ EMAIL_SUBJECT_PREFIX = '' #This is prepended on email subject.
#*********path settings*************************advised is not to change these values!!
PROJECT_PATH = os.path.abspath(os.path.dirname(bots.__file__))
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = PROJECT_PATH + '/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
#~ FILE_UPLOAD_TEMP_DIR = os.path.join(PROJECT_PATH, 'botssys/pluginsuploaded') #set in bots.ini
ROOT_URLCONF = 'bots.urls'
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
LOGOUT_URL = '/logout/'
#~ LOGOUT_REDIRECT_URL = #??not such parameter; is set in urls
TEMPLATE_DIRS = (
os.path.join(PROJECT_PATH, 'templates'),
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
#*********database settings*************************
#django-admin syncdb --pythonpath='/home/hje/botsup' --settings='bots.config.settings'
#SQLITE:
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = os.path.join(PROJECT_PATH, 'botssys/sqlitedb/botsdb') #path to database; if relative path: interpreted relative to bots root directory
DATABASE_USER = ''
DATABASE_PASSWORD = ''
DATABASE_HOST = ''
DATABASE_PORT = ''
DATABASE_OPTIONS = {}
#~ #MySQL:
#~ DATABASE_ENGINE = 'mysql'
#~ DATABASE_NAME = 'botsdb'
#~ DATABASE_USER = 'bots'
#~ DATABASE_PASSWORD = 'botsbots'
#~ DATABASE_HOST = '192.168.0.7'
#~ DATABASE_PORT = '3306'
#~ DATABASE_OPTIONS = {'use_unicode':True,'charset':'utf8',"init_command": 'SET storage_engine=INNODB'}
#PostgreSQL:
#~ DATABASE_ENGINE = 'postgresql_psycopg2'
#~ DATABASE_NAME = 'botsdb'
#~ DATABASE_USER = 'bots'
#~ DATABASE_PASSWORD = 'botsbots'
#~ DATABASE_HOST = '192.168.0.7'
#~ DATABASE_PORT = '5432'
#~ DATABASE_OPTIONS = {}
#*********sessions, cookies, log out time*************************
SESSION_EXPIRE_AT_BROWSER_CLOSE = True #True: always log in when browser is closed
SESSION_COOKIE_AGE = 3600 #seconds a user needs to login when no activity
SESSION_SAVE_EVERY_REQUEST = True #if True: SESSION_COOKIE_AGE is interpreted as: since last activity
#*********localization*************************
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Amsterdam'
DATE_FORMAT = "Y-m-d"
DATETIME_FORMAT = "Y-m-d G:i"
TIME_FORMAT = "G:i"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
#~ LANGUAGE_CODE = 'en-us'
LANGUAGE_CODE = 'en'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
#*************************************************************************
#*********other django setting. please consult django docs.***************
#set in bots.ini
#~ DEBUG = True
#~ TEMPLATE_DEBUG = DEBUG
SITE_ID = 1
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'm@-u37qiujmeqfbu$daaaaz)sp^7an4u@h=wfx9dd$$$zl2i*x9#awojdc'
ADMINS = (
('bots', 'your_email@domain.com'),
)
#save uploaded file (=plugin) always to file. no path for temp storage is used, so system default is used.
FILE_UPLOAD_HANDLERS = (
"django.core.files.uploadhandler.TemporaryFileUploadHandler",
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
#'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'bots.persistfilters.FilterPersistMiddleware',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'bots',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
)
| Python |
import time
import django
import models
import viewlib
import botslib
import botsglobal
django.contrib.admin.widgets.AdminSplitDateTime
HiddenInput = django.forms.widgets.HiddenInput
DEFAULT_ENTRY = ('',"---------")
editypelist=[DEFAULT_ENTRY] + sorted(models.EDITYPES)
confirmtypelist=[DEFAULT_ENTRY] + models.CONFIRMTYPE
def getroutelist(): #needed because the routeid is needed (and this is not theprimary key
return [DEFAULT_ENTRY]+[(l,l) for l in models.routes.objects.values_list('idroute', flat=True).order_by('idroute').distinct() ]
def getinmessagetypes():
return [DEFAULT_ENTRY]+[(l,l) for l in models.translate.objects.values_list('frommessagetype', flat=True).order_by('frommessagetype').distinct() ]
def getoutmessagetypes():
return [DEFAULT_ENTRY]+[(l,l) for l in models.translate.objects.values_list('tomessagetype', flat=True).order_by('tomessagetype').distinct() ]
def getallmessagetypes():
return [DEFAULT_ENTRY]+[(l,l) for l in sorted(set(list(models.translate.objects.values_list('tomessagetype', flat=True).all()) + list(models.translate.objects.values_list('frommessagetype', flat=True).all()) )) ]
def getpartners():
return [DEFAULT_ENTRY]+[(l,l) for l in models.partner.objects.values_list('idpartner', flat=True).filter(isgroup=False,active=True).order_by('idpartner') ]
def getfromchannels():
return [DEFAULT_ENTRY]+[(l,l) for l in models.channel.objects.values_list('idchannel', flat=True).filter(inorout='in').order_by('idchannel') ]
def gettochannels():
return [DEFAULT_ENTRY]+[(l,l) for l in models.channel.objects.values_list('idchannel', flat=True).filter(inorout='out').order_by('idchannel') ]
class Select(django.forms.Form):
datefrom = django.forms.DateTimeField(initial=viewlib.datetimefrom)
dateuntil = django.forms.DateTimeField(initial=viewlib.datetimeuntil)
page = django.forms.IntegerField(required=False,initial=1,widget=HiddenInput())
sortedby = django.forms.CharField(initial='ts',widget=HiddenInput())
sortedasc = django.forms.BooleanField(initial=False,required=False,widget=HiddenInput())
class View(django.forms.Form):
datefrom = django.forms.DateTimeField(required=False,initial=viewlib.datetimefrom,widget=HiddenInput())
dateuntil = django.forms.DateTimeField(required=False,initial=viewlib.datetimeuntil,widget=HiddenInput())
page = django.forms.IntegerField(required=False,initial=1,widget=HiddenInput())
sortedby = django.forms.CharField(required=False,initial='ts',widget=HiddenInput())
sortedasc = django.forms.BooleanField(required=False,initial=False,widget=HiddenInput())
class SelectReports(Select):
template = 'bots/selectform.html'
action = '/reports/'
status = django.forms.ChoiceField([DEFAULT_ENTRY,('1',"Error"),('0',"Done")],required=False,initial='')
class ViewReports(View):
template = 'bots/reports.html'
action = '/reports/'
status = django.forms.IntegerField(required=False,initial='',widget=HiddenInput())
class SelectIncoming(Select):
template = 'bots/selectform.html'
action = '/incoming/'
statust = django.forms.ChoiceField([DEFAULT_ENTRY,('1',"Error"),('3',"Done")],required=False,initial='')
idroute = django.forms.ChoiceField([],required=False,initial='')
frompartner = django.forms.ChoiceField([],required=False)
topartner = django.forms.ChoiceField([],required=False)
ineditype = django.forms.ChoiceField(editypelist,required=False)
inmessagetype = django.forms.ChoiceField([],required=False)
outeditype = django.forms.ChoiceField(editypelist,required=False)
outmessagetype = django.forms.ChoiceField([],required=False)
lastrun = django.forms.BooleanField(required=False,initial=False)
def __init__(self, *args, **kwargs):
super(SelectIncoming, self).__init__(*args, **kwargs)
self.fields['idroute'].choices = getroutelist()
self.fields['inmessagetype'].choices = getinmessagetypes()
self.fields['outmessagetype'].choices = getoutmessagetypes()
self.fields['frompartner'].choices = getpartners()
self.fields['topartner'].choices = getpartners()
class ViewIncoming(View):
template = 'bots/incoming.html'
action = '/incoming/'
statust = django.forms.IntegerField(required=False,initial='',widget=HiddenInput())
idroute = django.forms.CharField(required=False,widget=HiddenInput())
frompartner = django.forms.CharField(required=False,widget=HiddenInput())
topartner = django.forms.CharField(required=False,widget=HiddenInput())
ineditype = django.forms.CharField(required=False,widget=HiddenInput())
inmessagetype = django.forms.CharField(required=False,widget=HiddenInput())
outeditype = django.forms.CharField(required=False,widget=HiddenInput())
outmessagetype = django.forms.CharField(required=False,widget=HiddenInput())
lastrun = django.forms.BooleanField(required=False,initial=False,widget=HiddenInput())
botskey = django.forms.CharField(required=False,widget=HiddenInput())
class SelectDocument(Select):
template = 'bots/selectform.html'
action = '/document/'
idroute = django.forms.ChoiceField([],required=False,initial='')
frompartner = django.forms.ChoiceField([],required=False)
topartner = django.forms.ChoiceField([],required=False)
editype = django.forms.ChoiceField(editypelist,required=False)
messagetype = django.forms.ChoiceField(required=False)
lastrun = django.forms.BooleanField(required=False,initial=False)
botskey = django.forms.CharField(required=False,label='Document number',max_length=35)
def __init__(self, *args, **kwargs):
super(SelectDocument, self).__init__(*args, **kwargs)
self.fields['idroute'].choices = getroutelist()
self.fields['messagetype'].choices = getoutmessagetypes()
self.fields['frompartner'].choices = getpartners()
self.fields['topartner'].choices = getpartners()
class ViewDocument(View):
template = 'bots/document.html'
action = '/document/'
idroute = django.forms.CharField(required=False,widget=HiddenInput())
frompartner = django.forms.CharField(required=False,widget=HiddenInput())
topartner = django.forms.CharField(required=False,widget=HiddenInput())
editype = django.forms.CharField(required=False,widget=HiddenInput())
messagetype = django.forms.CharField(required=False,widget=HiddenInput())
lastrun = django.forms.BooleanField(required=False,initial=False,widget=HiddenInput())
botskey = django.forms.CharField(required=False,widget=HiddenInput())
class SelectOutgoing(Select):
template = 'bots/selectform.html'
action = '/outgoing/'
idroute = django.forms.ChoiceField([],required=False,initial='')
frompartner = django.forms.ChoiceField([],required=False)
topartner = django.forms.ChoiceField([],required=False)
editype = django.forms.ChoiceField(editypelist,required=False)
messagetype = django.forms.ChoiceField(required=False)
lastrun = django.forms.BooleanField(required=False,initial=False)
def __init__(self, *args, **kwargs):
super(SelectOutgoing, self).__init__(*args, **kwargs)
self.fields['idroute'].choices = getroutelist()
self.fields['messagetype'].choices = getoutmessagetypes()
self.fields['frompartner'].choices = getpartners()
self.fields['topartner'].choices = getpartners()
class ViewOutgoing(View):
template = 'bots/outgoing.html'
action = '/outgoing/'
idroute = django.forms.CharField(required=False,widget=HiddenInput())
frompartner = django.forms.CharField(required=False,widget=HiddenInput())
topartner = django.forms.CharField(required=False,widget=HiddenInput())
editype = django.forms.CharField(required=False,widget=HiddenInput())
messagetype = django.forms.CharField(required=False,widget=HiddenInput())
lastrun = django.forms.BooleanField(required=False,initial=False,widget=HiddenInput())
class SelectProcess(Select):
template = 'bots/selectform.html'
action = '/process/'
idroute = django.forms.ChoiceField([],required=False,initial='')
lastrun = django.forms.BooleanField(required=False,initial=False)
def __init__(self, *args, **kwargs):
super(SelectProcess, self).__init__(*args, **kwargs)
self.fields['idroute'].choices = getroutelist()
class ViewProcess(View):
template = 'bots/process.html'
action = '/process/'
idroute = django.forms.CharField(required=False,widget=HiddenInput())
lastrun = django.forms.BooleanField(required=False,initial=False,widget=HiddenInput())
class SelectConfirm(Select):
template = 'bots/selectform.html'
action = '/confirm/'
confirmtype = django.forms.ChoiceField(confirmtypelist,required=False,initial='0')
confirmed = django.forms.ChoiceField([('0',"All runs"),('1',"Current run"),('2',"Last run")],required=False,initial='0')
idroute = django.forms.ChoiceField([],required=False,initial='')
editype = django.forms.ChoiceField(editypelist,required=False)
messagetype = django.forms.ChoiceField([],required=False)
frompartner = django.forms.ChoiceField([],required=False)
topartner = django.forms.ChoiceField([],required=False)
fromchannel = django.forms.ChoiceField([],required=False)
tochannel = django.forms.ChoiceField([],required=False)
def __init__(self, *args, **kwargs):
super(SelectConfirm, self).__init__(*args, **kwargs)
self.fields['idroute'].choices = getroutelist()
self.fields['messagetype'].choices = getallmessagetypes()
self.fields['frompartner'].choices = getpartners()
self.fields['topartner'].choices = getpartners()
self.fields['fromchannel'].choices = getfromchannels()
self.fields['tochannel'].choices = gettochannels()
class ViewConfirm(View):
template = 'bots/confirm.html'
action = '/confirm/'
confirmtype = django.forms.CharField(required=False,widget=HiddenInput())
confirmed = django.forms.CharField(required=False,widget=HiddenInput())
idroute = django.forms.CharField(required=False,widget=HiddenInput())
editype = django.forms.CharField(required=False,widget=HiddenInput())
messagetype = django.forms.CharField(required=False,widget=HiddenInput())
frompartner = django.forms.CharField(required=False,widget=HiddenInput())
topartner = django.forms.CharField(required=False,widget=HiddenInput())
fromchannel = django.forms.CharField(required=False,widget=HiddenInput())
tochannel = django.forms.CharField(required=False,widget=HiddenInput())
class UploadFileForm(django.forms.Form):
file = django.forms.FileField(label='Plugin to read',required=True,widget=django.forms.widgets.FileInput(attrs={'size':'100'}))
class PlugoutForm(django.forms.Form):
databaseconfiguration = django.forms.BooleanField(required=False,initial=True,help_text='Routes, channels, translations, partners, etc.')
umlists = django.forms.BooleanField(required=False,initial=True,label='User maintained code lists',help_text='')
fileconfiguration = django.forms.BooleanField(required=False,initial=True,help_text='Grammars, mapping scrips, routes scripts, etc. (bots/usersys)')
infiles = django.forms.BooleanField(required=False,initial=True,help_text='Examples edi file in bots/botssys/infile')
charset = django.forms.BooleanField(required=False,initial=False,label='(Edifact) files with character sets',help_text='seldom needed.')
databasetransactions = django.forms.BooleanField(required=False,initial=False,help_text='From the database: Runs, incoming files, outgoing files, documents; only for support purposes, on request.')
data = django.forms.BooleanField(required=False,initial=False,label='All transaction files',help_text='bots/botssys/data; only for support purposes, on request.')
logfiles = django.forms.BooleanField(required=False,initial=False,label='Log files',help_text='bots/botssys/logging; only for support purposes, on request.')
config = django.forms.BooleanField(required=False,initial=False,label='configuration files',help_text='bots/config; only for support purposes, on request.')
database = django.forms.BooleanField(required=False,initial=False,label='SQLite database',help_text='Only for support purposes, on request.')
filename = django.forms.CharField(required=True,label='Plugin filename',max_length=250)
def __init__(self, *args, **kwargs):
super(PlugoutForm, self).__init__(*args, **kwargs)
self.fields['filename'].initial = botslib.join(botsglobal.ini.get('directories','botssys'),'myplugin' + time.strftime('_%Y%m%d') + '.zip')
class DeleteForm(django.forms.Form):
delbackup = django.forms.BooleanField(required=False,label='Delete backups of user scripts',initial=True,help_text='Delete backup files in usersys (purge).')
deltransactions = django.forms.BooleanField(required=False,label='Delete transactions',initial=True,help_text='Delete runs, reports, incoming, outgoing, data files.')
delconfiguration = django.forms.BooleanField(required=False,label='Delete configuration',initial=False,help_text='Delete routes, channels, translations, partners etc.')
delcodelists = django.forms.BooleanField(required=False,label='Delete user code lists',initial=False,help_text='Delete user code lists.')
deluserscripts = django.forms.BooleanField(required=False,label='Delete all user scripts',initial=False,help_text='Delete all scripts in usersys (grammars, mappings etc) except charsets.')
delinfile = django.forms.BooleanField(required=False,label='Delete botssys/infiles',initial=False,help_text='Delete files in botssys/infile.')
deloutfile = django.forms.BooleanField(required=False,label='Delete botssys/outfiles',initial=False,help_text='Delete files in botssys/outfile.')
| Python |
from django.utils.translation import ugettext as _
#bots-modules
import botslib
import node
from botsconfig import *
class Message(object):
''' abstract class; represents a edi message.
is subclassed as outmessage or inmessage object.
'''
def __init__(self):
self.recordnumber=0 #segment counter. Is not used for UNT of SE record; some editypes want sequential recordnumbering
def kill(self):
""" explicitly del big attributes....."""
if hasattr(self,'ta_info'): del self.ta_info
if hasattr(self,'root'): del self.root
if hasattr(self,'defmessage'): del self.defmessage
if hasattr(self,'records'): del self.records
if hasattr(self,'rawinput'): del self.rawinput
@staticmethod
def display(records):
'''for debugging lexed records.'''
for record in records:
t = 0
for veld in record:
if t==0:
print '%s (Record-id)'%(veld[VALUE])
else:
if veld[SFIELD]:
print ' %s (sub)'%(veld[VALUE])
else:
print ' %s (veld)'%(veld[VALUE])
t += 1
def change(self,where,change):
''' query tree (self.root) with where; if found replace with change; return True if change, return False if not changed.'''
if self.root.record is None:
raise botslib.MappingRootError(_(u'change($where,$change"): "root" of incoming message is empty; either split messages or use inn.getloop'),where=where,change=change)
return self.root.change(where,change)
def delete(self,*mpaths):
''' query tree (self.root) with mpath; delete if found. return True if deleted, return False if not deleted.'''
if self.root.record is None:
raise botslib.MappingRootError(_(u'delete($mpath): "root" of incoming message is empty; either split messages or use inn.getloop'),mpath=mpaths)
return self.root.delete(*mpaths)
def get(self,*mpaths):
''' query tree (self.root) with mpath; get value (string); get None if not found.'''
if self.root.record is None:
raise botslib.MappingRootError(_(u'get($mpath): "root" of incoming message is empty; either split messages or use inn.getloop'),mpath=mpaths)
return self.root.get(*mpaths)
def getnozero(self,*mpaths):
''' like get, returns None is value is zero (0) or not numeric.
Is sometimes usefull in mapping.'''
if self.root.record is None:
raise botslib.MappingRootError(_(u'get($mpath): "root" of incoming message is empty; either split messages or use inn.getloop'),mpath=mpaths)
return self.root.getnozero(*mpaths)
def getcount(self):
''' count number of nodes in self.root. Number of nodes is number of records.'''
return self.root.getcount()
def getcountoccurrences(self,*mpaths):
''' count number of nodes in self.root. Number of nodes is number of records.'''
count = 0
for value in self.getloop(*mpaths):
count += 1
return count
def getcountsum(self,*mpaths):
''' return the sum for all values found in mpath. Eg total number of ordered quantities.'''
if self.root.record is None:
raise botslib.MappingRootError(_(u'get($mpath): "root" of incoming message is empty; either split messages or use inn.getloop'),mpath=mpaths)
return self.root.getcountsum(*mpaths)
def getloop(self,*mpaths):
''' query tree with mpath; generates all the nodes. Is typically used as: for record in inn.get(mpath):
'''
if self.root.record: #self.root is a real root
for terug in self.root.getloop(*mpaths): #search recursive for rest of mpaths
yield terug
else: #self.root is dummy root
for childnode in self.root.children:
for terug in childnode.getloop(*mpaths): #search recursive for rest of mpaths
yield terug
def put(self,*mpaths,**kwargs):
if self.root.record is None and self.root.children:
raise botslib.MappingRootError(_(u'put($mpath): "root" of outgoing message is empty; use out.putloop'),mpath=mpaths)
return self.root.put(*mpaths,**kwargs)
def putloop(self,*mpaths):
if not self.root.record: #no input yet, and start with a putloop(): dummy root
if len(mpaths) == 1:
self.root.append(node.Node(mpaths[0]))
return self.root.children[-1]
else: #TODO: what if self.root.record is None and len(mpaths) > 1?
raise botslib.MappingRootError(_(u'putloop($mpath): mpath too long???'),mpath=mpaths)
return self.root.putloop(*mpaths)
def sort(self,*mpaths):
if self.root.record is None:
raise botslib.MappingRootError(_(u'get($mpath): "root" of message is empty; either split messages or use inn.getloop'),mpath=mpaths)
self.root.sort(*mpaths)
def normalisetree(self,node):
''' The node tree is check, sorted, fields are formatted etc.
Always use this method before writing output.
'''
self._checktree(node,self.defmessage.structure[0])
#~ node.display()
self._canonicaltree(node,self.defmessage.structure[0])
def _checktree(self,tree,structure):
''' checks tree with table:
- all records should be in table at the right place in hierarchy
- for each record, all fields should be in grammar
This function checks the root of grammar-structure with root of node tree
'''
if tree.record['BOTSID'] == structure[ID]:
#check tree recursively with structure
self._checktreecore(tree,structure)
else:
raise botslib.MessageError(_(u'Grammar "$grammar" has (root)record "$grammarroot"; found "$root".'),root=tree.record['BOTSID'],grammarroot=structure[ID],grammar=self.defmessage.grammarname)
def _checktreecore(self,node,structure):
''' recursive
'''
deletelist=[]
self._checkfields(node.record,structure)
if node.children and not LEVEL in structure:
if self.ta_info['checkunknownentities']:
raise botslib.MessageError(_(u'Record "$record" in message has children, but grammar "$grammar" not. Found "$xx".'),record=node.record['BOTSID'],grammar=self.defmessage.grammarname,xx=node.children[0].record['BOTSID'])
node.children=[]
return
for childnode in node.children: #for every node:
for structure_record in structure[LEVEL]: #search in grammar-records
if childnode.record['BOTSID'] == structure_record[ID]: #if found right structure_record
#check children recursive
self._checktreecore(childnode,structure_record)
break #check next mpathnode
else: #checked all structure_record in grammar, but nothing found
if self.ta_info['checkunknownentities']:
raise botslib.MessageError(_(u'Record "$record" in message not in structure of grammar "$grammar". Whole record: "$content".'),record=childnode.record['BOTSID'],grammar=self.defmessage.grammarname,content=childnode.record)
deletelist.append(childnode)
for child in deletelist:
node.children.remove(child)
def _checkfields(self,record,structure_record):
''' checks for every field in record if field exists in structure_record (from grammar).
'''
deletelist=[]
for field in record.keys(): #all fields in record should exist in structure_record
if field == 'BOTSIDnr':
continue
for grammarfield in structure_record[FIELDS]:
if grammarfield[ISFIELD]: #if field (no composite)
if field == grammarfield[ID]:
break
else: #if composite
for grammarsubfield in grammarfield[SUBFIELDS]: #loop subfields
if field == grammarsubfield[ID]:
break
else:
continue
break
else:
if self.ta_info['checkunknownentities']:
raise botslib.MessageError(_(u'Record: "$mpath" field "$field" does not exist.'),field=field,mpath=structure_record[MPATH])
deletelist.append(field)
for field in deletelist:
del record[field]
def _canonicaltree(self,node,structure,headerrecordnumber=0):
''' For nodes: check min and max occurence; sort the records conform grammar
'''
sortednodelist = []
self._canonicalfields(node.record,structure,headerrecordnumber) #handle fields of this record
if LEVEL in structure:
for structure_record in structure[LEVEL]: #for structure_record of this level in grammar
count = 0 #count number of occurences of record
for childnode in node.children: #for every node in mpathtree; SPEED: delete nodes from list when found
if childnode.record['BOTSID'] != structure_record[ID] or childnode.record['BOTSIDnr'] != structure_record[BOTSIDnr]: #if it is not the right NODE":
continue
count += 1
self._canonicaltree(childnode,structure_record,self.recordnumber) #use rest of index in deeper level
sortednodelist.append(childnode)
if structure_record[MIN] > count:
raise botslib.MessageError(_(u'Record "$mpath" mandatory but not present.'),mpath=structure_record[MPATH])
if structure_record[MAX] < count:
raise botslib.MessageError(_(u'Record "$mpath" occurs to often ($count times).'),mpath=structure_record[MPATH],count=count)
node.children=sortednodelist
if hasattr(self,'get_queries_from_edi'):
self.get_queries_from_edi(node,structure)
def _canonicalfields(self,noderecord,structure_record,headerrecordnumber):
''' For fields: check M/C; format the fields. Fields are not sorted (a dict can not be sorted).
Fields are never added.
'''
for grammarfield in structure_record[FIELDS]:
if grammarfield[ISFIELD]: #if field (no composite)
value = noderecord.get(grammarfield[ID])
#~ print '(message)field',noderecord,grammarfield
if not value:
#~ print 'field',grammarfield[ID], 'has no value'
if grammarfield[MANDATORY] == 'M':
raise botslib.MessageError(_(u'Record "$mpath" field "$field" is mandatory.'),mpath=structure_record[MPATH],field=grammarfield[ID])
continue
#~ print 'field',grammarfield[ID], 'value', value
noderecord[grammarfield[ID]] = self._formatfield(value,grammarfield,structure_record)
else: #if composite
for grammarsubfield in grammarfield[SUBFIELDS]: #loop subfields to see if data in composite
if noderecord.get(grammarsubfield[ID]):
break #composite has data.
else: #composite has no data
if grammarfield[MANDATORY]=='M':
raise botslib.MessageError(_(u'Record "$mpath" composite "$field" is mandatory.'),mpath=structure_record[MPATH],field=grammarfield[ID])
continue
#there is data in the composite!
for grammarsubfield in grammarfield[SUBFIELDS]: #loop subfields
value = noderecord.get(grammarsubfield[ID])
if not value:
if grammarsubfield[MANDATORY]=='M':
raise botslib.MessageError(_(u'Record "$mpath" subfield "$field" is mandatory: "$record".'),mpath=structure_record[MPATH],field=grammarsubfield[ID],record=noderecord)
continue
noderecord[grammarsubfield[ID]] = self._formatfield(value,grammarsubfield,structure_record)
| Python |
import os
import re
import zipfile
from django.utils.translation import ugettext as _
#bots-modules
import botslib
import botsglobal
from botsconfig import *
@botslib.log_session
def preprocess(routedict,function, status=FILEIN,**argv):
''' for pre- and postprocessing of files.
these are NOT translations; translation involve grammars, mapping scripts etc. think of eg:
- unzipping zipped files.
- convert excel to csv
- password protected files.
Select files from INFILE -> SET_FOR_PROCESSING using criteria
Than the actual processing function is called.
The processing function does: SET_FOR_PROCESSING -> PROCESSING -> FILEIN
If errors occur during processing, no ta are left with status FILEIN !
preprocess is called right after the in-communicatiation
'''
nr_files = 0
preprocessnumber = botslib.getpreprocessnumber()
if not botslib.addinfo(change={'status':preprocessnumber},where={'status':status,'idroute':routedict['idroute'],'fromchannel':routedict['fromchannel']}): #check if there is something to do
return 0
for row in botslib.query(u'''SELECT idta,filename,charset
FROM ta
WHERE idta>%(rootidta)s
AND status=%(status)s
AND statust=%(statust)s
AND idroute=%(idroute)s
AND fromchannel=%(fromchannel)s
''',
{'status':preprocessnumber,'statust':OK,'idroute':routedict['idroute'],'fromchannel':routedict['fromchannel'],'rootidta':botslib.get_minta4query()}):
try:
botsglobal.logmap.debug(u'Start preprocessing "%s" for file "%s".',function.__name__,row['filename'])
ta_set_for_processing = botslib.OldTransaction(row['idta'])
ta_processing = ta_set_for_processing.copyta(status=preprocessnumber+1)
ta_processing.filename=row['filename']
function(ta_from=ta_processing,endstatus=status,routedict=routedict,**argv)
except:
txt=botslib.txtexc()
ta_processing.failure()
ta_processing.update(statust=ERROR,errortext=txt)
else:
botsglobal.logmap.debug(u'OK preprocessing "%s" for file "%s".',function.__name__,row['filename'])
ta_set_for_processing.update(statust=DONE)
ta_processing.update(statust=DONE)
nr_files += 1
return nr_files
header = re.compile('(\s*(ISA))|(\s*(UNA.{6})?\s*(U\s*N\s*B)s*.{1}(.{4}).{1}(.{1}))',re.DOTALL)
# group: 1 2 3 4 5 6 7
def mailbag(ta_from,endstatus,**argv):
''' split 'mailbag' files to separate files each containing one interchange (ISA-IEA or UNA/UNB-UNZ).
handles x12 and edifact; these can be mixed.
recognizes xml files. messagetype 'xml' has a special handling when reading xml-files.
about auto-detect/mailbag:
- in US mailbag is used: one file for all received edi messages...appended in one file. I heard that edifact and x12 can be mixed,
but have actually never seen this.
- bots needs a 'splitter': one edi-file, more interchanges. it is preferred to split these first.
- handle multiple UNA in one file, including different charsets.
- auto-detect: is is x12, edifact, xml, or??
'''
edifile = botslib.readdata(filename=ta_from.filename) #read as binary...
startpos=0
while (1):
found = header.search(edifile[startpos:])
if found is None:
if startpos: #ISA/UNB have been found in file; no new ISA/UNB is found. So all processing is done.
break
#guess if this is an xml file.....
sniffxml = edifile[:25]
sniffxml = sniffxml.lstrip(' \t\n\r\f\v\xFF\xFE\xEF\xBB\xBF\x00') #to find first ' real' data; some char are because of BOM, UTF-16 etc
if sniffxml and sniffxml[0]=='<':
ta_to=ta_from.copyta(status=endstatus,statust=OK,filename=ta_from.filename,editype='xml',messagetype='mailbag') #make transaction for translated message; gets ta_info of ta_frommes
#~ ta_tomes.update(status=STATUSTMP,statust=OK,filename=ta_set_for_processing.filename,editype='xml') #update outmessage transaction with ta_info;
break;
else:
raise botslib.InMessageError(_(u'Found no content in mailbag.'))
elif found.group(1):
editype='x12'
headpos=startpos+ found.start(2)
count=0
for c in edifile[headpos:headpos+120]: #search first 120 characters to find separators
if c in '\r\n' and count!=105:
continue
count +=1
if count==4:
field_sep = c
elif count==106:
record_sep = c
break
#~ foundtrailer = re.search(re.escape(record_sep)+'\s*IEA'+re.escape(field_sep)+'.+?'+re.escape(record_sep),edifile[headpos:],re.DOTALL)
foundtrailer = re.search(re.escape(record_sep)+'\s*I\s*E\s*A\s*'+re.escape(field_sep)+'.+?'+re.escape(record_sep),edifile[headpos:],re.DOTALL)
elif found.group(3):
editype='edifact'
if found.group(4):
field_sep = edifile[startpos + found.start(4) + 4]
record_sep = edifile[startpos + found.start(4) + 8]
headpos=startpos+ found.start(4)
else:
field_sep = '+'
record_sep = "'"
headpos=startpos+ found.start(5)
foundtrailer = re.search(re.escape(record_sep)+'\s*U\s*N\s*Z\s*'+re.escape(field_sep)+'.+?'+re.escape(record_sep),edifile[headpos:],re.DOTALL)
if not foundtrailer:
raise botslib.InMessageError(_(u'Found no valid envelope trailer in mailbag.'))
endpos = headpos+foundtrailer.end()
#so: interchange is from headerpos untill endpos
#~ if header.search(edifile[headpos+25:endpos]): #check if there is another header in the interchange
#~ raise botslib.InMessageError(u'Error in mailbag format: found no valid envelope trailer.')
ta_to = ta_from.copyta(status=endstatus) #make transaction for translated message; gets ta_info of ta_frommes
tofilename = str(ta_to.idta)
tofile = botslib.opendata(tofilename,'wb')
tofile.write(edifile[headpos:endpos])
tofile.close()
ta_to.update(statust=OK,filename=tofilename,editype=editype,messagetype=editype) #update outmessage transaction with ta_info;
startpos=endpos
botsglobal.logger.debug(_(u' File written: "%s".'),tofilename)
def botsunzip(ta_from,endstatus,password=None,pass_non_zip=False,**argv):
''' unzip file;
editype & messagetype are unchanged.
'''
try:
z = zipfile.ZipFile(botslib.abspathdata(filename=ta_from.filename),mode='r')
except zipfile.BadZipfile:
botsglobal.logger.debug(_(u'File is not a zip-file.'))
if pass_non_zip: #just pass the file
botsglobal.logger.debug(_(u'"pass_non_zip" is True, just pass the file.'))
ta_to = ta_from.copyta(status=endstatus,statust=OK)
return
raise botslib.InMessageError(_(u'File is not a zip-file.'))
if password:
z.setpassword(password)
for f in z.infolist():
if f.filename[-1] == '/': #check if this is a dir; if so continue
continue
ta_to = ta_from.copyta(status=endstatus)
tofilename = str(ta_to.idta)
tofile = botslib.opendata(tofilename,'wb')
tofile.write(z.read(f.filename))
tofile.close()
ta_to.update(statust=OK,filename=tofilename) #update outmessage transaction with ta_info;
botsglobal.logger.debug(_(u' File written: "%s".'),tofilename)
def extractpdf(ta_from,endstatus,**argv):
''' Try to extract text content of a PDF file to a csv.
You know this is not a great idea, right? But we'll do the best we can anyway!
Page and line numbers are added to each row.
Columns and rows are based on the x and y coordinates of each text element within tolerance allowed.
Multiple text elements may combine to make one field, some PDFs have every character separated!
You may need to experiment with x_group and y_group values, but defaults seem ok for most files.
Output csv is UTF-8 encoded - The csv module doesn't directly support reading and writing Unicode
If the PDF is just an image, all bets are off. Maybe try OCR, good luck with that!
Mike Griffin 14/12/2011
'''
from pdfminer.pdfinterp import PDFResourceManager, process_pdf
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams, LTContainer, LTText, LTTextBox
import csv
class CsvConverter(TextConverter):
def __init__(self, *args, **kwargs):
TextConverter.__init__(self, *args, **kwargs)
def receive_layout(self, ltpage):
# recursively get every text element and it's coordinates
def render(item):
if isinstance(item, LTContainer):
for child in item:
render(child)
elif isinstance(item, LTText):
(_,_,x,y) = item.bbox
# group the y values (rows) within group tolerance
for v in yv:
if y > v-y_group and y < v+y_group:
y = v
yv.append(y)
line = lines[int(-y)]
line[x] = item.get_text().encode('utf-8')
from collections import defaultdict
lines = defaultdict(lambda : {})
yv = []
render(ltpage)
lineid = 0
for y in sorted(lines.keys()):
line = lines[y]
lineid += 1
csvdata = [ltpage.pageid,lineid] # first 2 columns are page and line numbers
# group the x values (fields) within group tolerance
p = 0
field_txt=''
for x in sorted(line.keys()):
gap = x - p
if p > 0 and gap > x_group:
csvdata.append(field_txt)
field_txt=''
field_txt += line[x]
p = x
csvdata.append(field_txt)
csvout.writerow(csvdata)
if lineid == 0:
raise botslib.InMessageError(_(u'PDF text extraction failed, it may contain just image(s)?'))
#get some optional parameters
x_group = argv.get('x_group',10) # group text closer than this as one field
y_group = argv.get('y_group',5) # group lines closer than this as one line
password = argv.get('password','')
quotechar = argv.get('quotechar','"')
field_sep = argv.get('field_sep',',')
escape = argv.get('escape','\\')
charset = argv.get('charset','utf-8')
if not escape:
doublequote = True
else:
doublequote = False
try:
pdf_stream = botslib.opendata(ta_from.filename, 'rb')
ta_to = ta_from.copyta(status=endstatus)
tofilename = str(ta_to.idta)
csv_stream = botslib.opendata(tofilename,'wb')
csvout = csv.writer(csv_stream, quotechar=quotechar, delimiter=field_sep, doublequote=doublequote, escapechar=escape)
# Process PDF
rsrcmgr = PDFResourceManager(caching=True)
device = CsvConverter(rsrcmgr, csv_stream, codec=charset)
process_pdf(rsrcmgr, device, pdf_stream, pagenos=set(), password=password, caching=True, check_extractable=True)
device.close()
pdf_stream.close()
csv_stream.close()
ta_to.update(statust=OK,filename=tofilename) #update outmessage transaction with ta_info;
botsglobal.logger.debug(_(u' File written: "%s".'),tofilename)
except:
txt=botslib.txtexc()
botsglobal.logger.error(_(u'PDF extraction failed, may not be a PDF file? Error:\n%s'),txt)
raise botslib.InMessageError(_(u'PDF extraction failed, may not be a PDF file? Error:\n$error'),error=txt)
def extractexcel(ta_from,endstatus,**argv):
''' extract excel file.
editype & messagetype are unchanged.
'''
#***functions used by extractexcel
#-------------------------------------------------------------------------------
def read_xls(infilename):
# Read excel first sheet into a 2-d array
book = xlrd.open_workbook(infilename)
sheet = book.sheet_by_index(0)
formatter = lambda(t,v): format_excelval(book,t,v,False)
xlsdata = []
for row in range(sheet.nrows):
(types, values) = (sheet.row_types(row), sheet.row_values(row))
xlsdata.append(map(formatter, zip(types, values)))
return xlsdata
#-------------------------------------------------------------------------------
def dump_csv(xlsdata, tofilename):
stream = botslib.opendata(tofilename, 'wb')
csvout = csv.writer(stream, quotechar=quotechar, delimiter=field_sep, doublequote=doublequote, escapechar=escape)
csvout.writerows( map(utf8ize, xlsdata) )
stream.close()
#-------------------------------------------------------------------------------
def format_excelval(book, type, value, wanttupledate):
# Clean up the incoming excel data for some data types
returnrow = []
if type == 2:
if value == int(value):
value = int(value)
elif type == 3:
datetuple = xlrd.xldate_as_tuple(value, book.datemode)
value = datetuple if wanttupledate else tupledate_to_isodate(datetuple)
elif type == 5:
value = xlrd.error_text_from_code[value]
return value
#-------------------------------------------------------------------------------
def tupledate_to_isodate(tupledate):
# Turns a gregorian (year, month, day, hour, minute, nearest_second) into a
# standard YYYY-MM-DDTHH:MM:SS ISO date.
(y,m,d, hh,mm,ss) = tupledate
nonzero = lambda n: n!=0
date = "%04d-%02d-%02d" % (y,m,d) if filter(nonzero, (y,m,d)) else ''
time = "T%02d:%02d:%02d" % (hh,mm,ss) if filter(nonzero, (hh,mm,ss)) or not date else ''
return date+time
#-------------------------------------------------------------------------------
def utf8ize(l):
# Make string-like things into utf-8, leave other things alone
return [unicode(s).encode(charset) if hasattr(s,'encode') else s for s in l]
#***end functions used by extractexcel
import xlrd
import csv
#get parameters for csv-format; defaults are as the csv defaults (in grammar.py)
charset = argv.get('charset',"utf-8")
quotechar = argv.get('quotechar',"'")
field_sep = argv.get('field_sep',':')
escape = argv.get('escape','')
if escape:
doublequote = False
else:
doublequote = True
try:
infilename = botslib.abspathdata(ta_from.filename)
xlsdata = read_xls(infilename)
ta_to = ta_from.copyta(status=endstatus)
tofilename = str(ta_to.idta)
dump_csv(xlsdata,tofilename)
ta_to.update(statust=OK,filename=tofilename) #update outmessage transaction with ta_info;
botsglobal.logger.debug(_(u' File written: "%s".'),tofilename)
except:
txt=botslib.txtexc()
botsglobal.logger.error(_(u'Excel extraction failed, may not be an Excel file? Error:\n%s'),txt)
raise botslib.InMessageError(_(u'Excel extraction failed, may not be an Excel file? Error:\n$error'),error=txt)
| Python |
#bots modules
import botslib
import botsglobal
from botsconfig import *
from django.utils.translation import ugettext as _
tavars = 'idta,statust,divtext,child,ts,filename,status,idroute,fromchannel,tochannel,frompartner,topartner,frommail,tomail,contenttype,nrmessages,editype,messagetype,errortext,script'
def evaluate(type,stuff2evaluate):
# try: catch errors in retry....this should of course not happen...
try:
if type in ['--retry','--retrycommunication','--automaticretrycommunication']:
return evaluateretryrun(type,stuff2evaluate)
else:
return evaluaterun(type,stuff2evaluate)
except:
botsglobal.logger.exception(_(u'Error in automatic maintenance.'))
return 1 #there has been an error!
def evaluaterun(type,stuff2evaluate):
''' traces all received files.
Write a filereport for each file,
and writes a report for the run.
'''
resultlast={OPEN:0,ERROR:0,OK:0,DONE:0} #gather results of all filereports for runreport
#look at infiles from this run; trace them to determine their tracestatus.
for tadict in botslib.query('''SELECT ''' + tavars + '''
FROM ta
WHERE idta > %(rootidta)s
AND status=%(status)s ''',
{'status':EXTERNIN,'rootidta':stuff2evaluate}):
botsglobal.logger.debug(u'evaluate %s.',tadict['idta'])
mytrace = Trace(tadict,stuff2evaluate)
resultlast[mytrace.statusttree]+=1
insert_filereport(mytrace)
del mytrace.ta
del mytrace
return finish_evaluation(stuff2evaluate,resultlast,type)
def evaluateretryrun(type,stuff2evaluate):
resultlast={OPEN:0,ERROR:0,OK:0,DONE:0}
didretry = False
for row in botslib.query('''SELECT idta
FROM filereport
GROUP BY idta
HAVING MAX(statust) != %(statust)s''',
{'statust':DONE}):
didretry = True
for tadict in botslib.query('''SELECT ''' + tavars + '''
FROM ta
WHERE idta= %(idta)s ''',
{'idta':row['idta']}):
break
else: #there really should be a corresponding ta
raise botslib.PanicError(_(u'MaintenanceRetry: could not find transaction "$txt".'),txt=row['idta'])
mytrace = Trace(tadict,stuff2evaluate)
resultlast[mytrace.statusttree]+=1
if mytrace.statusttree == DONE:
mytrace.errortext = ''
#~ mytrace.ta.update(tracestatus=mytrace.statusttree)
#ts for retried filereports is tricky: is this the time the file was originally received? best would be to use ts of prepare...
#that is quite difficult, so use time of this run
rootta=botslib.OldTransaction(stuff2evaluate)
rootta.syn('ts') #get the timestamp of this run
mytrace.ts = rootta.ts
insert_filereport(mytrace)
del mytrace.ta
del mytrace
if not didretry:
return 0 #no error
return finish_evaluation(stuff2evaluate,resultlast,type)
def insert_filereport(mytrace):
botslib.change(u'''INSERT INTO filereport (idta,statust,reportidta,retransmit,idroute,fromchannel,ts,
infilename,tochannel,frompartner,topartner,frommail,
tomail,ineditype,inmessagetype,outeditype,outmessagetype,
incontenttype,outcontenttype,nrmessages,outfilename,errortext,
divtext,outidta)
VALUES (%(idta)s,%(statust)s,%(reportidta)s,%(retransmit)s,%(idroute)s,%(fromchannel)s,%(ts)s,
%(infilename)s,%(tochannel)s,%(frompartner)s,%(topartner)s,%(frommail)s,
%(tomail)s,%(ineditype)s,%(inmessagetype)s,%(outeditype)s,%(outmessagetype)s,
%(incontenttype)s,%(outcontenttype)s,%(nrmessages)s,%(outfilename)s,%(errortext)s,
%(divtext)s,%(outidta)s )
''',
{'idta':mytrace.idta,'statust':mytrace.statusttree,'reportidta':mytrace.reportidta,
'retransmit':mytrace.retransmit,'idroute':mytrace.idroute,'fromchannel':mytrace.fromchannel,
'ts':mytrace.ts,'infilename':mytrace.infilename,'tochannel':mytrace.tochannel,
'frompartner':mytrace.frompartner,'topartner':mytrace.topartner,'frommail':mytrace.frommail,
'tomail':mytrace.tomail,'ineditype':mytrace.ineditype,'inmessagetype':mytrace.inmessagetype,
'outeditype':mytrace.outeditype,'outmessagetype':mytrace.outmessagetype,
'incontenttype':mytrace.incontenttype,'outcontenttype':mytrace.outcontenttype,
'nrmessages':mytrace.nrmessages,'outfilename':mytrace.outfilename,'errortext':mytrace.errortext,
'divtext':mytrace.divtext,'outidta':mytrace.outidta})
def finish_evaluation(stuff2evaluate,resultlast,type):
#count nr files send
for row in botslib.query('''SELECT COUNT(*) as count
FROM ta
WHERE idta > %(rootidta)s
AND status=%(status)s
AND statust=%(statust)s ''',
{'status':EXTERNOUT,'rootidta':stuff2evaluate,'statust':DONE}):
send = row['count']
#count process errors
for row in botslib.query('''SELECT COUNT(*) as count
FROM ta
WHERE idta >= %(rootidta)s
AND status=%(status)s
AND statust=%(statust)s''',
{'status':PROCESS,'rootidta':stuff2evaluate,'statust':ERROR}):
processerrors = row['count']
#generate report (in database)
rootta=botslib.OldTransaction(stuff2evaluate)
rootta.syn('ts') #get the timestamp of this run
LastReceived=resultlast[DONE]+resultlast[OK]+resultlast[OPEN]+resultlast[ERROR]
status = bool(resultlast[OK]+resultlast[OPEN]+resultlast[ERROR]+processerrors)
botslib.change(u'''INSERT INTO report (idta,lastopen,lasterror,lastok,lastdone,
send,processerrors,ts,lastreceived,status,type)
VALUES (%(idta)s,
%(lastopen)s,%(lasterror)s,%(lastok)s,%(lastdone)s,
%(send)s,%(processerrors)s,%(ts)s,%(lastreceived)s,%(status)s,%(type)s)
''',
{'idta':stuff2evaluate,
'lastopen':resultlast[OPEN],'lasterror':resultlast[ERROR],'lastok':resultlast[OK],'lastdone':resultlast[DONE],
'send':send,'processerrors':processerrors,'ts':rootta.ts,'lastreceived':LastReceived,'status':status,'type':type[2:]})
return generate_report(stuff2evaluate) #return report status: 0 (no error) or 1 (error)
def generate_report(stuff2evaluate):
for results in botslib.query('''SELECT idta,lastopen,lasterror,lastok,lastdone,
send,processerrors,ts,lastreceived,type,status
FROM report
WHERE idta=%(rootidta)s''',
{'rootidta':stuff2evaluate}):
break
else:
raise botslib.PanicError(_(u'In generate report: could not find report?'))
subject = _(u'[Bots Error Report] %(time)s')%{'time':str(results['ts'])[:16]}
reporttext = _(u'Bots Report; type: %(type)s, time: %(time)s\n')%{'type':results['type'],'time':str(results['ts'])[:19]}
reporttext += _(u' %d files received/processed in run.\n')%(results['lastreceived'])
if results['lastdone']:
reporttext += _(u' %d files without errors,\n')%(results['lastdone'])
if results['lasterror']:
subject += _(u'; %d file errors')%(results['lasterror'])
reporttext += _(u' %d files with errors,\n')%(results['lasterror'])
if results['lastok']:
subject += _(u'; %d files stuck')%(results['lastok'])
reporttext += _(u' %d files got stuck,\n')%(results['lastok'])
if results['lastopen']:
subject += _(u'; %d system errors')%(results['lastopen'])
reporttext += _(u' %d system errors,\n')%(results['lastopen'])
if results['processerrors']:
subject += _(u'; %d process errors')%(results['processerrors'])
reporttext += _(u' %d errors in processes.\n')%(results['processerrors'])
reporttext += _(u' %d files send in run.\n')%(results['send'])
botsglobal.logger.info(reporttext)
# sendreportifprocesserror allows blocking of email reports for process errors
if (results['lasterror'] or results['lastopen'] or results['lastok'] or
(results['processerrors'] and botsglobal.ini.getboolean('settings','sendreportifprocesserror',True))):
botslib.sendbotserrorreport(subject,reporttext)
return int(results['status']) #return report status: 0 (no error) or 1 (error)
class Trace(object):
''' ediobject-ta's form a tree; the incoming ediobject-ta (status EXTERNIN) is root.
(yes, this works for merging, strange but inherent).
tree gets a (one) statust, by walking the tree and evaluating the statust of nodes.
all nodes are put into a tree of ta-objects;
'''
def __init__(self,tadict,stuff2evaluate):
realdict = dict([(key,tadict[key]) for key in tadict.keys()])
self.ta=botslib.OldTransaction(**realdict)
self.rootidta = stuff2evaluate
self._buildevaluationstructure(self.ta)
#~ self.display(self.ta)
self._evaluatestatus()
self._gatherfilereportdata()
def display(self,currentta,level=0):
print level*' ',currentta.idta,currentta.statust,currentta.talijst
for ta in currentta.talijst:
self.display(ta,level+1)
def _buildevaluationstructure(self,tacurrent):
''' recursive,for each db-ta:
- fill global talist with the children (and children of children, etc)
'''
#gather next steps/ta's for tacurrent;
if tacurrent.child: #find successor by using child relation ship
for row in botslib.query('''SELECT ''' + tavars + '''
FROM ta
WHERE idta=%(child)s''',
{'child':tacurrent.child}):
realdict = dict([(key,row[key]) for key in row.keys()])
tacurrent.talijst = [botslib.OldTransaction(**realdict)]
else: #find successor by using parent-relationship; mostly this relation except for merge operations
talijst = []
for row in botslib.query('''SELECT ''' + tavars + '''
FROM ta
WHERE idta > %(currentidta)s
AND parent=%(currentidta)s ''', #adding the idta > %(parent)s to selection speeds up a lot.
{'currentidta':tacurrent.idta}):
realdict = dict([(key,row[key]) for key in row.keys()])
talijst.append(botslib.OldTransaction(**realdict))
#filter:
#one ta might have multiple children; 2 possible reasons for that:
#1. split up
#2. error is processing the file; and retried
#Here case 2 (error/retry) is filtered; it is not interesting to evaluate the older errors!
#So: if the same filename and different script: use newest idta
#shortcut: when an error occurs in a split all is turned back.
#so: split up is OK as a whole or because of retries.
#so: if split, and different scripts: split is becaue of retries: use newest idta.
#~ print tacurrent.talijst
if len(talijst) > 1 and talijst[0].script != talijst[1].script:
#find higest idta
highest_ta = talijst[0]
for ta in talijst[1:]:
if ta.idta > highest_ta.idta:
highest_ta = ta
tacurrent.talijst = [highest_ta]
else:
tacurrent.talijst = talijst
#recursive build:
for child in tacurrent.talijst:
self._buildevaluationstructure(child)
def _evaluatestatus(self):
self.done = False
try:
self.statusttree = self._evaluatetreestatus(self.ta)
if self.statusttree == OK:
self.statusttree = ERROR #this is ugly!!
except botslib.TraceNotPickedUpError:
self.statusttree = OK
except: #botslib.TraceError:
self.statusttree = OPEN
def _evaluatetreestatus(self,tacurrent):
''' recursive, walks tree of ediobject-ta, depth-first
for each db-ta:
- get statust of all child-db-ta (recursive); count these statust's
- evaluate this
rules for evaluating:
- typical error-situation: DONE->OK->ERROR
- Db-ta with statust OK will be picked up next botsrun.
- if succes on next botsrun: DONE-> DONE-> ERROR
-> DONE
- one db-ta can have more children; each of these children has to evaluated
- not possible is: DONE-> ERROR (because there should always be statust OK)
'''
statustcount = [0,0,0,0] #count of statust: number of OPEN, ERROR, OK, DONE
for child in tacurrent.talijst:
if child.idta > self.rootidta:
self.done = True
statustcount[self._evaluatetreestatus(child)]+=1
else: #evaluate & return statust of current ta & children;
if tacurrent.statust==DONE:
if statustcount[OK]:
return OK #at least one of the child-trees is not DONE
elif statustcount[DONE]:
return DONE #all is OK
elif statustcount[ERROR]:
raise botslib.TraceError(_(u'DONE but no child is DONE or OK (idta: $idta).'),idta=tacurrent.idta)
else: #if no ERROR and has no children: end of trace
return DONE
elif tacurrent.statust==OK:
if statustcount[ERROR]:
return OK #child(ren) ERROR, this is expected
elif statustcount[DONE]:
raise botslib.TraceError(_(u'OK but child is DONE (idta: $idta). Changing setup while errors are pending?'),idta=tacurrent.idta)
elif statustcount[OK]:
raise botslib.TraceError(_(u'OK but child is OK (idta: $idta). Changing setup while errors are pending?'),idta=tacurrent.idta)
else:
raise botslib.TraceNotPickedUpError(_(u'OK but file is not processed further (idta: $idta).'),idta=tacurrent.idta)
elif tacurrent.statust==ERROR:
if tacurrent.talijst:
raise botslib.TraceError(_(u'ERROR but has child(ren) (idta: $idta). Changing setup while errors are pending?'),idta=tacurrent.idta)
else:
#~ self.errorta += [tacurrent]
return ERROR
else: #tacurrent.statust==OPEN
raise botslib.TraceError(_(u'Severe error: found statust (idta: $idta).'),idta=tacurrent.idta)
def _gatherfilereportdata(self):
''' Walk the ta-tree again in order to retrieve information/data belonging to incoming file; statust (OK, DONE, ERROR etc) is NOT done here.
If information is different in different ta's: place '*'
Start 'root'-ta; a file coming in; status=EXTERNIN. Retrieve as much information from ta's as possible for the filereport.
'''
def core(ta):
if ta.status==MIMEIN:
self.frommail=ta.frommail
self.tomail=ta.tomail
self.incontenttype=ta.contenttype
elif ta.status==RAWOUT:
if ta.frommail:
if self.frommail:
if self.frommail != ta.frommail and asterisk:
self.frommail='*'
else:
self.frommail=ta.frommail
if ta.tomail:
if self.tomail:
if self.tomail != ta.tomail and asterisk:
self.tomail='*'
else:
self.tomail=ta.tomail
if ta.contenttype:
if self.outcontenttype:
if self.outcontenttype != ta.contenttype and asterisk:
self.outcontenttype='*'
else:
self.outcontenttype=ta.contenttype
if ta.idta:
if self.outidta:
if self.outidta != ta.idta and asterisk:
self.outidta=0
else:
self.outidta=ta.idta
elif ta.status==TRANSLATE:
#self.ineditype=ta.editype
if self.ineditype:
if self.ineditype!=ta.editype and asterisk:
self.ineditype='*'
else:
self.ineditype=ta.editype
elif ta.status==SPLITUP:
self.nrmessages+=1
if self.inmessagetype:
if self.inmessagetype!=ta.messagetype and asterisk:
self.inmessagetype='*'
else:
self.inmessagetype=ta.messagetype
elif ta.status==TRANSLATED:
#self.outeditype=ta.editype
if self.outeditype:
if self.outeditype!=ta.editype and asterisk:
self.outeditype='*'
else:
self.outeditype=ta.editype
if self.outmessagetype:
if self.outmessagetype!=ta.messagetype and asterisk:
self.outmessagetype='*'
else:
self.outmessagetype=ta.messagetype
if self.divtext:
if self.divtext!=ta.divtext and asterisk:
self.divtext='*'
else:
self.divtext=ta.divtext
elif ta.status==EXTERNOUT:
if self.outfilename:
if self.outfilename != ta.filename and asterisk:
self.outfilename='*'
else:
self.outfilename=ta.filename
if self.tochannel:
if self.tochannel != ta.tochannel and asterisk:
self.tochannel='*'
else:
self.tochannel=ta.tochannel
if ta.frompartner:
if not self.frompartner:
self.frompartner=ta.frompartner
elif self.frompartner!=ta.frompartner and asterisk:
self.frompartner='*'
if ta.topartner:
if not self.topartner:
self.topartner=ta.topartner
elif self.topartner!=ta.topartner and asterisk:
self.topartner='*'
if ta.errortext:
self.errortext = ta.errortext
for child in ta.talijst:
core(child)
#end of core function
asterisk = botsglobal.ini.getboolean('settings','multiplevaluesasterisk',True)
self.idta = self.ta.idta
self.reportidta = self.rootidta
self.retransmit = 0
self.idroute = self.ta.idroute
self.fromchannel = self.ta.fromchannel
self.ts = self.ta.ts
self.infilename = self.ta.filename
self.tochannel = ''
self.frompartner = ''
self.topartner = ''
self.frommail = ''
self.tomail = ''
self.ineditype = ''
self.inmessagetype = ''
self.outeditype = ''
self.outmessagetype = ''
self.incontenttype = ''
self.outcontenttype = ''
self.nrmessages = 0
self.outfilename = ''
self.outidta = 0
self.errortext = ''
self.divtext = ''
core(self.ta)
| Python |
'''module contains the functions to be called from user scripts'''
try:
import cPickle as pickle
except:
import pickle
import copy
import collections
from django.utils.translation import ugettext as _
#bots-modules
import botslib
import botsglobal
import inmessage
import outmessage
from botsconfig import *
#*******************************************************************************************************************
#****** functions imported from other modules. reason: user scripting uses primary transform functions *************
#*******************************************************************************************************************
from botslib import addinfo,updateinfo,changestatustinfo,checkunique
from envelope import mergemessages
from communication import run
@botslib.log_session
def translate(startstatus=TRANSLATE,endstatus=TRANSLATED,idroute=''):
''' translates edifiles in one or more edimessages.
reads and parses edifiles that have to be translated.
tries to split files into messages (using 'nextmessage' of grammar); if no splitting: edifile is one message.
searches the right translation in translate-table;
runs the mapping-script for the translation;
Function takes db-ta with status=TRANSLATE->PARSED->SPLITUP->TRANSLATED
'''
#select edifiles to translate; fill ta-object
#~ import gc
#~ gc.disable()
for row in botslib.query(u'''SELECT idta,frompartner,topartner,filename,messagetype,testindicator,editype,charset,alt,fromchannel
FROM ta
WHERE idta>%(rootidta)s
AND status=%(status)s
AND statust=%(statust)s
AND idroute=%(idroute)s
''',
{'status':startstatus,'statust':OK,'idroute':idroute,'rootidta':botslib.get_minta4query()}):
try:
ta_fromfile=botslib.OldTransaction(row['idta']) #TRANSLATE ta
ta_parsedfile = ta_fromfile.copyta(status=PARSED) #copy TRANSLATE to PARSED ta
#whole edi-file is read, parsed and made into a inmessage-object:
edifile = inmessage.edifromfile(frompartner=row['frompartner'],
topartner=row['topartner'],
filename=row['filename'],
messagetype=row['messagetype'],
testindicator=row['testindicator'],
editype=row['editype'],
charset=row['charset'],
alt=row['alt'],
fromchannel=row['fromchannel'],
idroute=idroute)
botsglobal.logger.debug(u'start read and parse input file "%s" editype "%s" messagetype "%s".',row['filename'],row['editype'],row['messagetype'])
for inn in edifile.nextmessage(): #for each message in the edifile:
#inn.ta_info: parameters from inmessage.edifromfile(), syntax-information and parse-information
ta_frommes=ta_parsedfile.copyta(status=SPLITUP) #copy PARSED to SPLITUP ta
inn.ta_info['idta_fromfile'] = ta_fromfile.idta #for confirmations in user script; used to give idta of 'confirming message'
ta_frommes.update(**inn.ta_info) #update ta-record SLIPTUP with info from message content and/or grammar
while 1: #whileloop continues as long as there are alt-translations
#************select parameters for translation(script):
for row2 in botslib.query(u'''SELECT tscript,tomessagetype,toeditype
FROM translate
WHERE frommessagetype = %(frommessagetype)s
AND fromeditype = %(fromeditype)s
AND active=%(booll)s
AND alt=%(alt)s
AND (frompartner_id IS NULL OR frompartner_id=%(frompartner)s OR frompartner_id in (SELECT to_partner_id
FROM partnergroup
WHERE from_partner_id=%(frompartner)s ))
AND (topartner_id IS NULL OR topartner_id=%(topartner)s OR topartner_id in (SELECT to_partner_id
FROM partnergroup
WHERE from_partner_id=%(topartner)s ))
ORDER BY alt DESC,
CASE WHEN frompartner_id IS NULL THEN 1 ELSE 0 END, frompartner_id ,
CASE WHEN topartner_id IS NULL THEN 1 ELSE 0 END, topartner_id ''',
{'frommessagetype':inn.ta_info['messagetype'],
'fromeditype':inn.ta_info['editype'],
'alt':inn.ta_info['alt'],
'frompartner':inn.ta_info['frompartner'],
'topartner':inn.ta_info['topartner'],
'booll':True}):
break #escape if found; we need only the first - ORDER BY in the query
else: #no translation record is found
raise botslib.TranslationNotFoundError(_(u'Editype "$editype", messagetype "$messagetype", frompartner "$frompartner", topartner "$topartner", alt "$alt"'),
editype=inn.ta_info['editype'],
messagetype=inn.ta_info['messagetype'],
frompartner=inn.ta_info['frompartner'],
topartner=inn.ta_info['topartner'],
alt=inn.ta_info['alt'])
ta_tomes=ta_frommes.copyta(status=endstatus) #copy SPLITUP to TRANSLATED ta
tofilename = str(ta_tomes.idta)
tscript=row2['tscript']
tomessage = outmessage.outmessage_init(messagetype=row2['tomessagetype'],editype=row2['toeditype'],filename=tofilename,reference=unique('messagecounter'),statust=OK,divtext=tscript) #make outmessage object
#copy ta_info
botsglobal.logger.debug(u'script "%s" translates messagetype "%s" to messagetype "%s".',tscript,inn.ta_info['messagetype'],tomessage.ta_info['messagetype'])
translationscript,scriptfilename = botslib.botsimport('mappings',inn.ta_info['editype'] + '.' + tscript) #get the mapping-script
doalttranslation = botslib.runscript(translationscript,scriptfilename,'main',inn=inn,out=tomessage)
botsglobal.logger.debug(u'script "%s" finished.',tscript)
if 'topartner' not in tomessage.ta_info: #tomessage does not contain values from ta......
tomessage.ta_info['topartner']=inn.ta_info['topartner']
if tomessage.ta_info['statust'] == DONE: #if indicated in user script the message should be discarded
botsglobal.logger.debug(u'No output file because mapping script explicitly indicated this.')
tomessage.ta_info['filename'] = ''
tomessage.ta_info['status'] = DISCARD
else:
botsglobal.logger.debug(u'Start writing output file editype "%s" messagetype "%s".',tomessage.ta_info['editype'],tomessage.ta_info['messagetype'])
tomessage.writeall() #write tomessage (result of translation).
#problem is that not all values ta_tomes are know to to_message....
#~ print 'tomessage.ta_info',tomessage.ta_info
ta_tomes.update(**tomessage.ta_info) #update outmessage transaction with ta_info;
del tomessage
#~ gc.collect()
if not doalttranslation:
break #out of while loop
else:
inn.ta_info['alt'] = doalttranslation
#end of while-loop
#~ print inn.ta_info
ta_frommes.update(statust=DONE,**inn.ta_info) #update db. inn.ta_info could be changed by script. Is this useful?
del inn
#~ gc.collect()
#exceptions file_in-level
except:
#~ edifile.handleconfirm(ta_fromfile,error=True) #only useful if errors are reported in acknowledgement (eg x12 997). Not used now.
txt=botslib.txtexc()
ta_parsedfile.failure()
ta_parsedfile.update(statust=ERROR,errortext=txt)
botsglobal.logger.debug(u'error in translating input file "%s":\n%s',row['filename'],txt)
else:
edifile.handleconfirm(ta_fromfile,error=False)
ta_fromfile.update(statust=DONE)
ta_parsedfile.update(statust=DONE,**edifile.confirminfo)
botsglobal.logger.debug(u'translated input file "%s".',row['filename'])
del edifile
#~ gc.collect()
#~ gc.enable()
#*********************************************************************
#*** utily functions for persist: store things in the bots database.
#*** this is intended as a memory stretching across messages.
#*********************************************************************
def persist_add(domein,botskey,value):
''' store persistent values in db.
'''
content = pickle.dumps(value,0)
if botsglobal.settings.DATABASE_ENGINE != 'sqlite3' and len(content)>1024:
raise botslib.PersistError(_(u'Data too long for domein "$domein", botskey "$botskey", value "$value".'),domein=domein,botskey=botskey,value=value)
try:
botslib.change(u'''INSERT INTO persist (domein,botskey,content)
VALUES (%(domein)s,%(botskey)s,%(content)s)''',
{'domein':domein,'botskey':botskey,'content':content})
except:
raise botslib.PersistError(_(u'Failed to add for domein "$domein", botskey "$botskey", value "$value".'),domein=domein,botskey=botskey,value=value)
def persist_update(domein,botskey,value):
''' store persistent values in db.
'''
content = pickle.dumps(value,0)
if botsglobal.settings.DATABASE_ENGINE != 'sqlite3' and len(content)>1024:
raise botslib.PersistError(_(u'Data too long for domein "$domein", botskey "$botskey", value "$value".'),domein=domein,botskey=botskey,value=value)
botslib.change(u'''UPDATE persist
SET content=%(content)s
WHERE domein=%(domein)s
AND botskey=%(botskey)s''',
{'domein':domein,'botskey':botskey,'content':content})
def persist_add_update(domein,botskey,value):
# add the record, or update it if already there.
try:
persist_add(domein,botskey,value)
except:
persist_update(domein,botskey,value)
def persist_delete(domein,botskey):
''' store persistent values in db.
'''
botslib.change(u'''DELETE FROM persist
WHERE domein=%(domein)s
AND botskey=%(botskey)s''',
{'domein':domein,'botskey':botskey})
def persist_lookup(domein,botskey):
''' lookup persistent values in db.
'''
for row in botslib.query(u'''SELECT content
FROM persist
WHERE domein=%(domein)s
AND botskey=%(botskey)s''',
{'domein':domein,'botskey':botskey}):
return pickle.loads(str(row['content']))
return None
#*********************************************************************
#*** utily functions for codeconversion
#*** 2 types: codeconversion via database tabel ccode, and via file.
#*** 20111116: codeconversion via file is depreciated, will disappear.
#*********************************************************************
#***code conversion via database tabel ccode
def ccode(ccodeid,leftcode,field='rightcode'):
''' converts code using a db-table.
converted value is returned, exception if not there.
'''
for row in botslib.query(u'''SELECT ''' +field+ '''
FROM ccode
WHERE ccodeid_id = %(ccodeid)s
AND leftcode = %(leftcode)s''',
{'ccodeid':ccodeid,
'leftcode':leftcode,
}):
return row[field]
raise botslib.CodeConversionError(_(u'Value "$value" not in code-conversion, user table "$table".'),value=leftcode,table=ccodeid)
codetconversion = ccode
def safe_ccode(ccodeid,leftcode,field='rightcode'):
''' converts code using a db-table.
converted value is returned, if not there return orginal code
'''
try:
return ccode(ccodeid,leftcode,field)
except botslib.CodeConversionError:
return leftcode
safecodetconversion = safe_ccode
def reverse_ccode(ccodeid,rightcode,field='leftcode'):
''' as ccode but reversed lookup.'''
for row in botslib.query(u'''SELECT ''' +field+ '''
FROM ccode
WHERE ccodeid_id = %(ccodeid)s
AND rightcode = %(rightcode)s''',
{'ccodeid':ccodeid,
'rightcode':rightcode,
}):
return row[field]
raise botslib.CodeConversionError(_(u'Value "$value" not in code-conversion, user table "$table".'),value=rightcode,table=ccodeid)
rcodetconversion = reverse_ccode
def safe_reverse_ccode(ccodeid,rightcode,field='leftcode'):
''' as safe_ccode but reversed lookup.'''
try:
return ccode(ccodeid,rightcode,field)
except botslib.CodeConversionError:
return rightcode
safercodetconversion = safe_reverse_ccode
def getcodeset(ccodeid,leftcode,field='rightcode'):
''' Get a code set
'''
return list(botslib.query(u'''SELECT ''' +field+ '''
FROM ccode
WHERE ccodeid_id = %(ccodeid)s
AND leftcode = %(leftcode)s''',
{'ccodeid':ccodeid,
'leftcode':leftcode,
}))
#***code conversion via file. 20111116: depreciated
def safecodeconversion(modulename,value):
''' converts code using a codelist.
converted value is returned.
codelist is first imported from file in codeconversions (lookup right place/mudule in bots.ini)
'''
module,filename = botslib.botsimport('codeconversions',modulename)
try:
return module.codeconversions[value]
except KeyError:
return value
def codeconversion(modulename,value):
''' converts code using a codelist.
converted value is returned.
codelist is first imported from file in codeconversions (lookup right place/mudule in bots.ini)
'''
module,filename = botslib.botsimport('codeconversions',modulename)
try:
return module.codeconversions[value]
except KeyError:
raise botslib.CodeConversionError(_(u'Value "$value" not in file for codeconversion "$filename".'),value=value,filename=filename)
def safercodeconversion(modulename,value):
''' as codeconversion but reverses the dictionary first'''
module,filename = botslib.botsimport('codeconversions',modulename)
if not hasattr(module,'botsreversed'+'codeconversions'):
reversedict = dict((value,key) for key,value in module.codeconversions.items())
setattr(module,'botsreversed'+'codeconversions',reversedict)
try:
return module.botsreversedcodeconversions[value]
except KeyError:
return value
def rcodeconversion(modulename,value):
''' as codeconversion but reverses the dictionary first'''
module,filename = botslib.botsimport('codeconversions',modulename)
if not hasattr(module,'botsreversed'+'codeconversions'):
reversedict = dict((value,key) for key,value in module.codeconversions.items())
setattr(module,'botsreversed'+'codeconversions',reversedict)
try:
return module.botsreversedcodeconversions[value]
except KeyError:
raise botslib.CodeConversionError(_(u'Value "$value" not in file for reversed codeconversion "$filename".'),value=value,filename=filename)
#*********************************************************************
#*** utily functions for calculating/generating/checking EAN/GTIN/GLN
#*********************************************************************
def calceancheckdigit(ean):
''' input: EAN without checkdigit; returns the checkdigit'''
try:
if not ean.isdigit():
raise botslib.EanError(_(u'GTIN "$ean" should be string with only numericals'),ean=ean)
except AttributeError:
raise botslib.EanError(_(u'GTIN "$ean" should be string, but is a "$type"'),ean=ean,type=type(ean))
sum1=sum([int(x)*3 for x in ean[-1::-2]]) + sum([int(x) for x in ean[-2::-2]])
return str((1000-sum1)%10)
def calceancheckdigit2(ean):
''' just for fun: slightly different algoritm for calculating the ean checkdigit. same results; is 10% faster.
'''
sum1 = 0
factor = 3
for i in ean[-1::-1]:
sum1 += int(i) * factor
factor = 4 - factor #factor flip-flops between 3 and 1...
return str(((1000 - sum1) % 10))
def checkean(ean):
''' input: EAN; returns: True (valid EAN) of False (EAN not valid)'''
return (ean[-1] == calceancheckdigit(ean[:-1]))
def addeancheckdigit(ean):
''' input: EAN without checkdigit; returns EAN with checkdigit'''
return ean+calceancheckdigit(ean)
#*********************************************************************
#*** div utily functions for mappings
#*********************************************************************
def unique(domein):
''' generate unique number within range domein.
uses db to keep track of last generated number.
if domein not used before, initialized with 1.
'''
return str(botslib.unique(domein))
def inn2out(inn,out):
''' copies inn-message to outmessage
'''
out.root = copy.deepcopy(inn.root)
def useoneof(*args):
for arg in args:
if arg:
return arg
else:
return None
def dateformat(date):
''' for edifact: return right format code for the date. '''
if not date:
return None
if len(date)==8:
return '102'
if len(date)==12:
return '203'
if len(date)==16:
return '718'
return None
def datemask(value,frommask,tomask):
''' value is formatted according as in frommask;
returned is the value formatted according to tomask.
'''
if not value:
return value
convdict = collections.defaultdict(list)
for key,value in zip(frommask,value):
convdict[key].append(value)
#~ return ''.join([convdict.get(c,[c]).pop(0) for c in tomask]) #very short, but not faster....
terug = ''
for c in tomask:
terug += convdict.get(c,[c]).pop(0)
return terug
| Python |
from datetime import datetime
from django.db import models
from django.utils.translation import ugettext as _
'''
django is not excellent in generating db. But they have provided a way to customize the generated database using SQL. see bots/sql/*.
'''
STATUST = [
(0, _(u'Open')),
(1, _(u'Error')),
(2, _(u'Stuck')),
(3, _(u'Done')),
]
STATUS = [
(1,_(u'process')),
(3,_(u'discarded')),
(200,_(u'FileReceive')),
(210,_(u'RawInfile')),
(215,_(u'Mimein')),
(220,_(u'Infile')),
(230,_(u'Set for preprocess')),
(231,_(u'Preprocess')),
(232,_(u'Set for preprocess')),
(233,_(u'Preprocess')),
(234,_(u'Set for preprocess')),
(235,_(u'Preprocess')),
(236,_(u'Set for preprocess')),
(237,_(u'Preprocess')),
(238,_(u'Set for preprocess')),
(239,_(u'Preprocess')),
(300,_(u'Translate')),
(310,_(u'Parsed')),
(320,_(u'Splitup')),
(330,_(u'Translated')),
(400,_(u'Merged')),
(500,_(u'Outfile')),
(510,_(u'RawOutfile')),
(520,_(u'FileSend')),
]
EDITYPES = [
('csv', _(u'csv')),
('database', _(u'database (old)')),
('db', _(u'db')),
('edifact', _(u'edifact')),
('email-confirmation',_(u'email-confirmation')),
('fixed', _(u'fixed')),
('idoc', _(u'idoc')),
('json', _(u'json')),
('jsonnocheck', _(u'jsonnocheck')),
('mailbag', _(u'mailbag')),
('raw', _(u'raw')),
('template', _(u'template')),
('templatehtml', _(u'template-html')),
('tradacoms', _(u'tradacoms')),
('xml', _(u'xml')),
('xmlnocheck', _(u'xmlnocheck')),
('x12', _(u'x12')),
]
INOROUT = (
('in', _(u'in')),
('out', _(u'out')),
)
CHANNELTYPE = (
('file', _(u'file')),
('smtp', _(u'smtp')),
('smtps', _(u'smtps')),
('smtpstarttls', _(u'smtpstarttls')),
('pop3', _(u'pop3')),
('pop3s', _(u'pop3s')),
('pop3apop', _(u'pop3apop')),
('imap4', _(u'imap4')),
('imap4s', _(u'imap4s')),
('ftp', _(u'ftp')),
('ftps', _(u'ftps (explicit)')),
('ftpis', _(u'ftps (implicit)')),
('sftp', _(u'sftp (ssh)')),
('xmlrpc', _(u'xmlrpc')),
('mimefile', _(u'mimefile')),
('communicationscript', _(u'communicationscript')),
('db', _(u'db')),
('database', _(u'database (old)')),
('intercommit', _(u'intercommit')),
)
CONFIRMTYPE = [
('ask-email-MDN',_(u'ask an email confirmation (MDN) when sending')),
('send-email-MDN',_(u'send an email confirmation (MDN) when receiving')),
('ask-x12-997',_(u'ask a x12 confirmation (997) when sending')),
('send-x12-997',_(u'send a x12 confirmation (997) when receiving')),
('ask-edifact-CONTRL',_(u'ask an edifact confirmation (CONTRL) when sending')),
('send-edifact-CONTRL',_(u'send an edifact confirmation (CONTRL) when receiving')),
]
RULETYPE = (
('all',_(u'all')),
('route',_(u'route')),
('channel',_(u'channel')),
('frompartner',_(u'frompartner')),
('topartner',_(u'topartner')),
('messagetype',_(u'messagetype')),
)
ENCODE_MIME = (
('always',_(u'base64')),
('never',_(u'never')),
('ascii',_(u'base64 if not ascii')),
)
class StripCharField(models.CharField):
''' strip values before saving to database. this is not default in django #%^&*'''
def get_db_prep_value(self, value,*args,**kwargs):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if isinstance(value, basestring):
return value.strip()
else:
return value
class botsmodel(models.Model):
class Meta:
abstract = True
def delete(self, *args, **kwargs):
''' bots does not use cascaded deletes!; so for delete: set references to null'''
self.clear_nullable_related()
super(botsmodel, self).delete(*args, **kwargs)
def clear_nullable_related(self):
"""
Recursively clears any nullable foreign key fields on related objects.
Django is hard-wired for cascading deletes, which is very dangerous for
us. This simulates ON DELETE SET NULL behavior manually.
"""
for related in self._meta.get_all_related_objects():
accessor = related.get_accessor_name()
related_set = getattr(self, accessor)
if related.field.null:
related_set.clear()
else:
for related_object in related_set.all():
related_object.clear_nullable_related()
#***********************************************************************************
#******** written by webserver ********************************************************
#***********************************************************************************
class confirmrule(botsmodel):
#~ id = models.IntegerField(primary_key=True)
active = models.BooleanField(default=False)
confirmtype = StripCharField(max_length=35,choices=CONFIRMTYPE)
ruletype = StripCharField(max_length=35,choices=RULETYPE)
negativerule = models.BooleanField(default=False)
frompartner = models.ForeignKey('partner',related_name='cfrompartner',null=True,blank=True)
topartner = models.ForeignKey('partner',related_name='ctopartner',null=True,blank=True)
#~ idroute = models.ForeignKey('routes',null=True,blank=True,verbose_name='route')
idroute = StripCharField(max_length=35,null=True,blank=True,verbose_name=_(u'route'))
idchannel = models.ForeignKey('channel',null=True,blank=True,verbose_name=_(u'channel'))
editype = StripCharField(max_length=35,choices=EDITYPES,blank=True)
messagetype = StripCharField(max_length=35,blank=True)
rsrv1 = StripCharField(max_length=35,blank=True,null=True) #added 20100501
rsrv2 = models.IntegerField(null=True) #added 20100501
def __unicode__(self):
return unicode(self.confirmtype) + u' ' + unicode(self.ruletype)
class Meta:
db_table = 'confirmrule'
verbose_name = _(u'confirm rule')
ordering = ['confirmtype','ruletype']
class ccodetrigger(botsmodel):
ccodeid = StripCharField(primary_key=True,max_length=35,verbose_name=_(u'type code'))
ccodeid_desc = StripCharField(max_length=35,null=True,blank=True)
def __unicode__(self):
return unicode(self.ccodeid)
class Meta:
db_table = 'ccodetrigger'
verbose_name = _(u'user code type')
ordering = ['ccodeid']
class ccode(botsmodel):
#~ id = models.IntegerField(primary_key=True) #added 20091221
ccodeid = models.ForeignKey(ccodetrigger,verbose_name=_(u'type code'))
leftcode = StripCharField(max_length=35,db_index=True)
rightcode = StripCharField(max_length=35,db_index=True)
attr1 = StripCharField(max_length=35,blank=True)
attr2 = StripCharField(max_length=35,blank=True)
attr3 = StripCharField(max_length=35,blank=True)
attr4 = StripCharField(max_length=35,blank=True)
attr5 = StripCharField(max_length=35,blank=True)
attr6 = StripCharField(max_length=35,blank=True)
attr7 = StripCharField(max_length=35,blank=True)
attr8 = StripCharField(max_length=35,blank=True)
def __unicode__(self):
return unicode(self.ccodeid) + u' ' + unicode(self.leftcode) + u' ' + unicode(self.rightcode)
class Meta:
db_table = 'ccode'
verbose_name = _(u'user code')
unique_together = (('ccodeid','leftcode','rightcode'),)
ordering = ['ccodeid']
class channel(botsmodel):
idchannel = StripCharField(max_length=35,primary_key=True)
inorout = StripCharField(max_length=35,choices=INOROUT,verbose_name=_(u'in/out'))
type = StripCharField(max_length=35,choices=CHANNELTYPE) #protocol type
charset = StripCharField(max_length=35,default=u'us-ascii')
host = StripCharField(max_length=256,blank=True)
port = models.PositiveIntegerField(default=0,blank=True,null=True)
username = StripCharField(max_length=35,blank=True)
secret = StripCharField(max_length=35,blank=True,verbose_name=_(u'password'))
starttls = models.BooleanField(default=False,verbose_name='No check from-address',help_text=_(u"Do not check if an incoming 'from' email addresses is known.")) #20091027: used as 'no check on "from:" email address'
apop = models.BooleanField(default=False,verbose_name='No check to-address',help_text=_(u"Do not check if an incoming 'to' email addresses is known.")) #not used anymore (is in 'type' now) #20110104: used as 'no check on "to:" email address'
remove = models.BooleanField(default=False,help_text=_(u'For in-channels: remove the edi files after successful reading. Note: in production you do want to remove the edi files, else these are read over and over again!'))
path = StripCharField(max_length=256,blank=True) #different from host - in ftp both are used
filename = StripCharField(max_length=35,blank=True,help_text=_(u'For "type" ftp and file; read or write this filename. Wildcards allowed, eg "*.edi". Note for out-channels: if no wildcard is used, all edi message are written to one file.'))
lockname = StripCharField(max_length=35,blank=True,help_text=_(u'When reading or writing edi files in this directory use this file to indicate a directory lock.'))
syslock = models.BooleanField(default=False,help_text=_(u'Use system file locking for reading & writing edi files on windows, *nix.'))
parameters = StripCharField(max_length=70,blank=True)
ftpaccount = StripCharField(max_length=35,blank=True)
ftpactive = models.BooleanField(default=False)
ftpbinary = models.BooleanField(default=False)
askmdn = StripCharField(max_length=17,blank=True,choices=ENCODE_MIME,verbose_name=_(u'mime encoding'),help_text=_(u'Should edi-files be base64-encoded in email. Using base64 for edi (default) is often a good choice.')) #not used anymore 20091019: 20100703: used to indicate mime-encoding
sendmdn = StripCharField(max_length=17,blank=True) #not used anymore 20091019
mdnchannel = StripCharField(max_length=35,blank=True) #not used anymore 20091019
archivepath = StripCharField(max_length=256,blank=True,verbose_name=_(u'Archive path'),help_text=_(u'Write incoming or outgoing edi files to an archive. Use absolute or relative path; relative path is relative to bots directory. Eg: "botssys/archive/mychannel".')) #added 20091028
desc = models.TextField(max_length=256,null=True,blank=True)
rsrv1 = StripCharField(max_length=35,blank=True,null=True) #added 20100501
rsrv2 = models.IntegerField(null=True,blank=True,verbose_name=_(u'Max seconds'),help_text=_(u'Max seconds used for the in-communication time for this channel.')) #added 20100501. 20110906: max communication time.
class Meta:
ordering = ['idchannel']
db_table = 'channel'
def __unicode__(self):
return self.idchannel
class partner(botsmodel):
idpartner = StripCharField(max_length=35,primary_key=True,verbose_name=_(u'partner identification'))
active = models.BooleanField(default=False)
isgroup = models.BooleanField(default=False)
name = StripCharField(max_length=256) #only used for user information
mail = StripCharField(max_length=256,blank=True)
cc = models.EmailField(max_length=256,blank=True)
mail2 = models.ManyToManyField(channel, through='chanpar',blank=True)
group = models.ManyToManyField("self",db_table='partnergroup',blank=True,symmetrical=False,limit_choices_to = {'isgroup': True})
rsrv1 = StripCharField(max_length=35,blank=True,null=True) #added 20100501
rsrv2 = models.IntegerField(null=True) #added 20100501
class Meta:
ordering = ['idpartner']
db_table = 'partner'
def __unicode__(self):
return unicode(self.idpartner)
class chanpar(botsmodel):
#~ id = models.IntegerField(primary_key=True) #added 20091221
idpartner = models.ForeignKey(partner,verbose_name=_(u'partner'))
idchannel = models.ForeignKey(channel,verbose_name=_(u'channel'))
mail = StripCharField(max_length=256)
cc = models.EmailField(max_length=256,blank=True) #added 20091111
askmdn = models.BooleanField(default=False) #not used anymore 20091019
sendmdn = models.BooleanField(default=False) #not used anymore 20091019
class Meta:
unique_together = (("idpartner","idchannel"),)
db_table = 'chanpar'
verbose_name = _(u'email address per channel')
verbose_name_plural = _(u'email address per channel')
def __unicode__(self):
return str(self.idpartner) + ' ' + str(self.idchannel) + ' ' + str(self.mail)
class translate(botsmodel):
#~ id = models.IntegerField(primary_key=True)
active = models.BooleanField(default=False)
fromeditype = StripCharField(max_length=35,choices=EDITYPES,help_text=_(u'Editype to translate from.'))
frommessagetype = StripCharField(max_length=35,help_text=_(u'Messagetype to translate from.'))
alt = StripCharField(max_length=35,null=False,blank=True,verbose_name=_(u'Alternative translation'),help_text=_(u'Do this translation only for this alternative translation.'))
frompartner = models.ForeignKey(partner,related_name='tfrompartner',null=True,blank=True,help_text=_(u'Do this translation only for this frompartner.'))
topartner = models.ForeignKey(partner,related_name='ttopartner',null=True,blank=True,help_text=_(u'Do this translation only for this topartner.'))
tscript = StripCharField(max_length=35,help_text=_(u'User mapping script to use for translation.'))
toeditype = StripCharField(max_length=35,choices=EDITYPES,help_text=_(u'Editype to translate to.'))
tomessagetype = StripCharField(max_length=35,help_text=_(u'Messagetype to translate to.'))
desc = models.TextField(max_length=256,null=True,blank=True)
rsrv1 = StripCharField(max_length=35,blank=True,null=True) #added 20100501
rsrv2 = models.IntegerField(null=True) #added 20100501
class Meta:
db_table = 'translate'
verbose_name = _(u'translation')
ordering = ['fromeditype','frommessagetype']
def __unicode__(self):
return unicode(self.fromeditype) + u' ' + unicode(self.frommessagetype) + u' ' + unicode(self.alt) + u' ' + unicode(self.frompartner) + u' ' + unicode(self.topartner)
class routes(botsmodel):
#~ id = models.IntegerField(primary_key=True)
idroute = StripCharField(max_length=35,db_index=True,help_text=_(u'identification of route; one route can consist of multiple parts having the same "idroute".'))
seq = models.PositiveIntegerField(default=1,help_text=_(u'for routes consisting of multiple parts, "seq" indicates the order these parts are run.'))
active = models.BooleanField(default=False)
fromchannel = models.ForeignKey(channel,related_name='rfromchannel',null=True,blank=True,verbose_name=_(u'incoming channel'),limit_choices_to = {'inorout': 'in'})
fromeditype = StripCharField(max_length=35,choices=EDITYPES,blank=True,help_text=_(u'the editype of the incoming edi files.'))
frommessagetype = StripCharField(max_length=35,blank=True,help_text=_(u'the messagetype of incoming edi files. For edifact: messagetype=edifact; for x12: messagetype=x12.'))
tochannel = models.ForeignKey(channel,related_name='rtochannel',null=True,blank=True,verbose_name=_(u'outgoing channel'),limit_choices_to = {'inorout': 'out'})
toeditype = StripCharField(max_length=35,choices=EDITYPES,blank=True,help_text=_(u'Only edi files with this editype to this outgoing channel.'))
tomessagetype = StripCharField(max_length=35,blank=True,help_text=_(u'Only edi files of this messagetype to this outgoing channel.'))
alt = StripCharField(max_length=35,default=u'',blank=True,verbose_name='Alternative translation',help_text=_(u'Only use if there is more than one "translation" for the same editype and messagetype. Advanced use, seldom needed.'))
frompartner = models.ForeignKey(partner,related_name='rfrompartner',null=True,blank=True,help_text=_(u'The frompartner of the incoming edi files. Seldom needed.'))
topartner = models.ForeignKey(partner,related_name='rtopartner',null=True,blank=True,help_text=_(u'The topartner of the incoming edi files. Seldom needed.'))
frompartner_tochannel = models.ForeignKey(partner,related_name='rfrompartner_tochannel',null=True,blank=True,help_text=_(u'Only edi files from this partner/partnergroup for this outgoing channel'))
topartner_tochannel = models.ForeignKey(partner,related_name='rtopartner_tochannel',null=True,blank=True,help_text=_(u'Only edi files to this partner/partnergroup to this channel'))
testindicator = StripCharField(max_length=1,blank=True,help_text=_(u'Only edi files with this testindicator to this outgoing channel.'))
translateind = models.BooleanField(default=True,blank=True,verbose_name='translate',help_text=_(u'Do a translation in this route.'))
notindefaultrun = models.BooleanField(default=False,blank=True,help_text=_(u'Do not use this route in a normal run. Advanced, related to scheduling specific routes or not.'))
desc = models.TextField(max_length=256,null=True,blank=True)
rsrv1 = StripCharField(max_length=35,blank=True,null=True) #added 20100501
rsrv2 = models.IntegerField(null=True) #added 20100501
defer = models.BooleanField(default=False,blank=True,help_text=_(u'Set ready for communication, but defer actual communication (this is done in another route)')) #added 20100601
class Meta:
db_table = 'routes'
verbose_name = _(u'route')
unique_together = (("idroute","seq"),)
ordering = ['idroute','seq']
def __unicode__(self):
return unicode(self.idroute) + u' ' + unicode(self.seq)
#***********************************************************************************
#******** written by engine ********************************************************
#***********************************************************************************
class filereport(botsmodel):
#~ id = models.IntegerField(primary_key=True)
idta = models.IntegerField(db_index=True)
reportidta = models.IntegerField(db_index=True)
statust = models.IntegerField(choices=STATUST)
retransmit = models.IntegerField()
idroute = StripCharField(max_length=35)
fromchannel = StripCharField(max_length=35)
tochannel = StripCharField(max_length=35)
frompartner = StripCharField(max_length=35)
topartner = StripCharField(max_length=35)
frommail = StripCharField(max_length=256)
tomail = StripCharField(max_length=256)
ineditype = StripCharField(max_length=35,choices=EDITYPES)
inmessagetype = StripCharField(max_length=35)
outeditype = StripCharField(max_length=35,choices=EDITYPES)
outmessagetype = StripCharField(max_length=35)
incontenttype = StripCharField(max_length=35)
outcontenttype = StripCharField(max_length=35)
nrmessages = models.IntegerField()
ts = models.DateTimeField(db_index=True) #copied from ta
infilename = StripCharField(max_length=256)
inidta = models.IntegerField(null=True) #not used anymore
outfilename = StripCharField(max_length=256)
outidta = models.IntegerField()
divtext = StripCharField(max_length=35)
errortext = StripCharField(max_length=2048)
rsrv1 = StripCharField(max_length=35,blank=True,null=True) #added 20100501
rsrv2 = models.IntegerField(null=True) #added 20100501
class Meta:
db_table = 'filereport'
unique_together = (("idta","reportidta"),)
class mutex(botsmodel):
#specific SQL is used (database defaults are used)
mutexk = models.IntegerField(primary_key=True)
mutexer = models.IntegerField()
ts = models.DateTimeField()
class Meta:
db_table = 'mutex'
class persist(botsmodel):
#OK, this has gone wrong. There is no primary key here, so django generates this. But there is no ID in the custom sql.
#Django still uses the ID in sql manager. This leads to an error in snapshot plugin. Disabled this in snapshot function; to fix this really database has to be changed.
#specific SQL is used (database defaults are used)
domein = StripCharField(max_length=35)
botskey = StripCharField(max_length=35)
content = StripCharField(max_length=1024)
ts = models.DateTimeField()
class Meta:
db_table = 'persist'
unique_together = (("domein","botskey"),)
class report(botsmodel):
idta = models.IntegerField(primary_key=True) #rename to reportidta
lastreceived = models.IntegerField()
lastdone = models.IntegerField()
lastopen = models.IntegerField()
lastok = models.IntegerField()
lasterror = models.IntegerField()
send = models.IntegerField()
processerrors = models.IntegerField()
ts = models.DateTimeField() #copied from (runroot)ta
type = StripCharField(max_length=35)
status = models.BooleanField()
rsrv1 = StripCharField(max_length=35,blank=True,null=True) #added 20100501
rsrv2 = models.IntegerField(null=True) ##added 20100501
class Meta:
db_table = 'report'
#~ #trigger for sqlite to use local time (instead of utc). I can not add this to sqlite specific sql code, as django does not allow complex (begin ... end) sql here.
#~ CREATE TRIGGER uselocaltime AFTER INSERT ON ta
#~ BEGIN
#~ UPDATE ta
#~ SET ts = datetime('now','localtime')
#~ WHERE idta = new.idta ;
#~ END;
class ta(botsmodel):
#specific SQL is used (database defaults are used)
idta = models.AutoField(primary_key=True)
statust = models.IntegerField(choices=STATUST)
status = models.IntegerField(choices=STATUS)
parent = models.IntegerField(db_index=True)
child = models.IntegerField()
script = models.IntegerField(db_index=True)
idroute = StripCharField(max_length=35)
filename = StripCharField(max_length=256)
frompartner = StripCharField(max_length=35)
topartner = StripCharField(max_length=35)
fromchannel = StripCharField(max_length=35)
tochannel = StripCharField(max_length=35)
editype = StripCharField(max_length=35)
messagetype = StripCharField(max_length=35)
alt = StripCharField(max_length=35)
divtext = StripCharField(max_length=35)
merge = models.BooleanField()
nrmessages = models.IntegerField()
testindicator = StripCharField(max_length=10) #0:production; 1:test. Length to 1?
reference = StripCharField(max_length=70)
frommail = StripCharField(max_length=256)
tomail = StripCharField(max_length=256)
charset = StripCharField(max_length=35)
statuse = models.IntegerField() #obsolete 20091019 but still used by intercommit comm. module
retransmit = models.BooleanField() #20070831: only retransmit, not rereceive
contenttype = StripCharField(max_length=35)
errortext = StripCharField(max_length=2048)
ts = models.DateTimeField()
confirmasked = models.BooleanField() #added 20091019; confirmation asked or send
confirmed = models.BooleanField() #added 20091019; is confirmation received (when asked)
confirmtype = StripCharField(max_length=35) #added 20091019
confirmidta = models.IntegerField() #added 20091019
envelope = StripCharField(max_length=35) #added 20091024
botskey = StripCharField(max_length=35) #added 20091024
cc = StripCharField(max_length=512) #added 20091111
rsrv1 = StripCharField(max_length=35) #added 20100501
rsrv2 = models.IntegerField(null=True) #added 20100501
rsrv3 = StripCharField(max_length=35) #added 20100501
rsrv4 = models.IntegerField(null=True) #added 20100501
class Meta:
db_table = 'ta'
class uniek(botsmodel):
#specific SQL is used (database defaults are used)
domein = StripCharField(max_length=35,primary_key=True)
nummer = models.IntegerField()
class Meta:
db_table = 'uniek'
verbose_name = _(u'counter')
ordering = ['domein']
| Python |
#!/usr/bin/env python
import os
import optparse
import subprocess
import sys
here = os.path.dirname(__file__)
def main():
usage = "usage: %prog [file1..fileN]"
description = """With no file paths given this script will automatically
compress all jQuery-based files of the admin app. Requires the Google Closure
Compiler library and Java version 6 or later."""
parser = optparse.OptionParser(usage, description=description)
parser.add_option("-c", dest="compiler", default="~/bin/compiler.jar",
help="path to Closure Compiler jar file")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose")
(options, args) = parser.parse_args()
compiler = os.path.expanduser(options.compiler)
if not os.path.exists(compiler):
sys.exit("Google Closure compiler jar file %s not found. Please use the -c option to specify the path." % compiler)
if not args:
if options.verbose:
sys.stdout.write("No filenames given; defaulting to admin scripts\n")
args = [os.path.join(here, f) for f in [
"actions.js", "collapse.js", "inlines.js", "prepopulate.js"]]
for arg in args:
if not arg.endswith(".js"):
arg = arg + ".js"
to_compress = os.path.expanduser(arg)
if os.path.exists(to_compress):
to_compress_min = "%s.min.js" % "".join(arg.rsplit(".js"))
cmd = "java -jar %s --js %s --js_output_file %s" % (compiler, to_compress, to_compress_min)
if options.verbose:
sys.stdout.write("Running: %s\n" % cmd)
subprocess.call(cmd.split())
else:
sys.stdout.write("File %s not found. Sure it exists?\n" % to_compress)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
import os
import optparse
import subprocess
import sys
here = os.path.dirname(__file__)
def main():
usage = "usage: %prog [file1..fileN]"
description = """With no file paths given this script will automatically
compress all jQuery-based files of the admin app. Requires the Google Closure
Compiler library and Java version 6 or later."""
parser = optparse.OptionParser(usage, description=description)
parser.add_option("-c", dest="compiler", default="~/bin/compiler.jar",
help="path to Closure Compiler jar file")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose")
(options, args) = parser.parse_args()
compiler = os.path.expanduser(options.compiler)
if not os.path.exists(compiler):
sys.exit("Google Closure compiler jar file %s not found. Please use the -c option to specify the path." % compiler)
if not args:
if options.verbose:
sys.stdout.write("No filenames given; defaulting to admin scripts\n")
args = [os.path.join(here, f) for f in [
"actions.js", "collapse.js", "inlines.js", "prepopulate.js"]]
for arg in args:
if not arg.endswith(".js"):
arg = arg + ".js"
to_compress = os.path.expanduser(arg)
if os.path.exists(to_compress):
to_compress_min = "%s.min.js" % "".join(arg.rsplit(".js"))
cmd = "java -jar %s --js %s --js_output_file %s" % (compiler, to_compress, to_compress_min)
if options.verbose:
sys.stdout.write("Running: %s\n" % cmd)
subprocess.call(cmd.split())
else:
sys.stdout.write("File %s not found. Sure it exists?\n" % to_compress)
if __name__ == '__main__':
main()
| Python |
''' Base library for bots. Botslib should not import from other Bots-modules.'''
import sys
import os
import codecs
import traceback
import subprocess
import socket #to set a time-out for connections
import string
import urlparse
import urllib
import platform
import django
from django.utils.translation import ugettext as _
#Bots-modules
from botsconfig import *
import botsglobal #as botsglobal
def botsinfo():
return [
(_(u'server name'),botsglobal.ini.get('webserver','name','bots-webserver')),
(_(u'served at port'),botsglobal.ini.getint('webserver','port',8080)),
(_(u'platform'),platform.platform()),
(_(u'machine'),platform.machine()),
(_(u'python version'),sys.version),
(_(u'django version'),django.VERSION),
(_(u'bots version'),botsglobal.version),
(_(u'bots installation path'),botsglobal.ini.get('directories','botspath')),
(_(u'config path'),botsglobal.ini.get('directories','config')),
(_(u'botssys path'),botsglobal.ini.get('directories','botssys')),
(_(u'usersys path'),botsglobal.ini.get('directories','usersysabs')),
(u'DATABASE_ENGINE',botsglobal.settings.DATABASE_ENGINE),
(u'DATABASE_NAME',botsglobal.settings.DATABASE_NAME),
(u'DATABASE_USER',botsglobal.settings.DATABASE_USER),
(u'DATABASE_HOST',botsglobal.settings.DATABASE_HOST),
(u'DATABASE_PORT',botsglobal.settings.DATABASE_PORT),
(u'DATABASE_OPTIONS',botsglobal.settings.DATABASE_OPTIONS),
]
#**********************************************************/**
#**************getters/setters for some globals***********************/**
#**********************************************************/**
def get_minta4query():
''' get the first idta for queries etc.'''
return botsglobal.minta4query
def set_minta4query():
if botsglobal.minta4query: #if already set, do nothing
return
else:
botsglobal.minta4query = _Transaction.processlist[1] #set root-idta of current run
def set_minta4query_retry():
botsglobal.minta4query = get_idta_last_error()
return botsglobal.minta4query
def get_idta_last_error():
for row in query('''SELECT idta
FROM filereport
GROUP BY idta
HAVING MAX(statust) != %(statust)s''',
{'statust':DONE}):
#found incoming file with error
for row2 in query('''SELECT min(reportidta) as min
FROM filereport
WHERE idta = %(idta)s ''',
{'idta':row['idta']}):
return row2['min']
return 0 #if no error found.
def set_minta4query_crashrecovery():
''' set/return rootidta of last run - that is supposed to crashed'''
for row in query('''SELECT max(idta) as max
FROM ta
WHERE script= 0
'''):
if row['max'] is None:
return 0
botsglobal.minta4query = row['max']
return botsglobal.minta4query
return 0
def getlastrun():
return _Transaction.processlist[1] #get root-idta of last run
def setrouteid(routeid):
botsglobal.routeid = routeid
def getrouteid():
return botsglobal.routeid
def setpreprocessnumber(statusnumber):
botsglobal.preprocessnumber = statusnumber
def getpreprocessnumber():
terug = botsglobal.preprocessnumber
botsglobal.preprocessnumber +=2
return terug
#**********************************************************/**
#***************** class Transaction *********************/**
#**********************************************************/**
class _Transaction(object):
''' abstract class for db-ta.
This class is used for communication with db-ta.
'''
#filtering values fo db handling (to avoid unknown fields in db.
filterlist=['statust','status','divtext','parent','child','script','frompartner','topartner','fromchannel','tochannel','editype','messagetype','merge',
'testindicator','reference','frommail','tomail','contenttype','errortext','filename','charset','alt','idroute','nrmessages','retransmit',
'confirmasked','confirmed','confirmtype','confirmidta','envelope','botskey','cc']
processlist=[0] #stack for bots-processes. last one is the current process; starts with 1 element in list: root
def update(self,**ta_info):
''' Updates db-ta with named-parameters/dict.
Use a filter to update only valid fields in db-ta
'''
setstring = ','.join([key+'=%('+key+')s' for key in ta_info if key in _Transaction.filterlist])
if not setstring: #nothing to update
return
ta_info['selfid'] = self.idta #always set this...I'm not sure if this is needed...take no chances
cursor = botsglobal.db.cursor()
cursor.execute(u'''UPDATE ta
SET '''+setstring+ '''
WHERE idta=%(selfid)s''',
ta_info)
botsglobal.db.commit()
cursor.close()
def delete(self):
'''Deletes current transaction '''
cursor = botsglobal.db.cursor()
cursor.execute(u'''DELETE FROM ta
WHERE idta=%(selfid)s''',
{'selfid':self.idta})
botsglobal.db.commit()
cursor.close()
def failure(self):
'''Failure: deletes all children of transaction (and children of children etc)'''
cursor = botsglobal.db.cursor()
cursor.execute(u'''SELECT idta FROM ta
WHERE idta>%(rootidta)s
AND parent=%(selfid)s''',
{'selfid':self.idta,'rootidta':get_minta4query()})
rows = cursor.fetchall()
for row in rows:
ta=OldTransaction(row['idta'])
ta.failure()
cursor.execute(u'''DELETE FROM ta
WHERE idta>%(rootidta)s
AND parent=%(selfid)s''',
{'selfid':self.idta,'rootidta':get_minta4query()})
botsglobal.db.commit()
cursor.close()
def mergefailure(self):
'''Failure while merging: all parents of transaction get status OK (turn back)'''
cursor = botsglobal.db.cursor()
cursor.execute(u'''UPDATE ta
SET statust=%(statustnew)s
WHERE idta>%(rootidta)s
AND child=%(selfid)s
AND statust=%(statustold)s''',
{'selfid':self.idta,'statustold':DONE,'statustnew':OK,'rootidta':get_minta4query()})
botsglobal.db.commit()
cursor.close()
def syn(self,*ta_vars):
'''access of attributes of transaction as ta.fromid, ta.filename etc'''
cursor = botsglobal.db.cursor()
varsstring = ','.join(ta_vars)
cursor.execute(u'''SELECT ''' + varsstring + '''
FROM ta
WHERE idta=%(selfid)s''',
{'selfid':self.idta})
result = cursor.fetchone()
for key in result.keys():
setattr(self,key,result[key])
cursor.close()
def synall(self):
'''access of attributes of transaction as ta.fromid, ta.filename etc'''
cursor = botsglobal.db.cursor()
varsstring = ','.join(self.filterlist)
cursor.execute(u'''SELECT ''' + varsstring + '''
FROM ta
WHERE idta=%(selfid)s''',
{'selfid':self.idta})
result = cursor.fetchone()
for key in result.keys():
setattr(self,key,result[key])
cursor.close()
def copyta(self,status,**ta_info):
''' copy: make a new transaction, copy '''
script = _Transaction.processlist[-1]
cursor = botsglobal.db.cursor()
cursor.execute(u'''INSERT INTO ta (script, status, parent,frompartner,topartner,fromchannel,tochannel,editype,messagetype,alt,merge,testindicator,reference,frommail,tomail,charset,contenttype,filename,idroute,nrmessages,botskey)
SELECT %(script)s,%(newstatus)s,idta,frompartner,topartner,fromchannel,tochannel,editype,messagetype,alt,merge,testindicator,reference,frommail,tomail,charset,contenttype,filename,idroute,nrmessages,botskey
FROM ta
WHERE idta=%(selfid)s''',
{'selfid':self.idta,'script':script,'newstatus':status})
newidta = cursor.lastrowid
if not newidta: #if botsglobal.settings.DATABASE_ENGINE ==
cursor.execute('''SELECT lastval() as idta''')
newidta = cursor.fetchone()['idta']
botsglobal.db.commit()
cursor.close()
newdbta = OldTransaction(newidta)
newdbta.update(**ta_info)
return newdbta
class OldTransaction(_Transaction):
def __init__(self,idta,**ta_info):
'''Use old transaction '''
self.idta = idta
self.talijst=[]
for key in ta_info.keys(): #only used by trace
setattr(self,key,ta_info[key]) #could be done better, but SQLite does not support .items()
class NewTransaction(_Transaction):
def __init__(self,**ta_info):
'''Generates new transaction, returns key of transaction '''
updatedict = dict([(key,value) for key,value in ta_info.items() if key in _Transaction.filterlist])
updatedict['script'] = _Transaction.processlist[-1]
namesstring = ','.join([key for key in updatedict])
varsstring = ','.join(['%('+key+')s' for key in updatedict])
cursor = botsglobal.db.cursor()
cursor.execute(u'''INSERT INTO ta (''' + namesstring + ''')
VALUES (''' + varsstring + ''')''',
updatedict)
self.idta = cursor.lastrowid
if not self.idta:
cursor.execute('''SELECT lastval() as idta''')
self.idta = cursor.fetchone()['idta']
botsglobal.db.commit()
cursor.close()
class NewProcess(NewTransaction):
''' Used in logging of processes. Each process is placed on stack processlist'''
def __init__(self,functionname=''):
super(NewProcess,self).__init__(filename=functionname,status=PROCESS,idroute=getrouteid())
_Transaction.processlist.append(self.idta)
def update(self,**ta_info):
super(NewProcess,self).update(**ta_info)
_Transaction.processlist.pop()
def trace_origin(ta,where=None):
''' bots traces back all from the current step/ta.
where is a dict that is used to indicate a condition.
eg: {'status':EXTERNIN}
If bots finds a ta for which this is true, the ta is added to a list.
The list is returned when all tracing is done, and contains all ta's for which 'where' is True
'''
def trace_recurse(ta):
''' recursive
walk over ta's backward (to origin).
if condition is met, add the ta to a list
'''
for idta in get_parent(ta):
donelijst.append(idta)
taparent=OldTransaction(idta=idta)
taparent.synall()
for key,value in where.items():
if getattr(taparent,key) != value:
break
else: #all where-criteria are true; check if we already have this ta
teruglijst.append(taparent)
trace_recurse(taparent)
def get_parent(ta):
''' yields the parents of a ta '''
if ta.parent: #the is a parent via the normal parent-pointer
if ta.parent not in donelijst:
yield ta.parent
else: #no parent via parent-link, so look via child-link
for row in query('''SELECT idta
FROM ta
WHERE idta>%(rootidta)s
AND child=%(idta)s''',
{'idta':ta.idta,'rootidta':get_minta4query()}):
if row['idta'] in donelijst:
continue
yield row['idta']
donelijst = []
teruglijst = []
ta.syn('parent')
trace_recurse(ta)
return teruglijst
def addinfocore(change,where,wherestring):
''' core function for add/changes information in db-ta's.
where-dict selects db-ta's, change-dict sets values;
returns the number of db-ta that have been changed.
'''
if 'rootidta' not in where:
where['rootidta']=get_minta4query()
wherestring = ' idta > %(rootidta)s AND ' + wherestring
if 'statust' not in where: #by default: look only for statust is OK
where['statust']=OK
wherestring += ' AND statust = %(statust)s '
if 'statust' not in change: #by default: new ta is OK
change['statust']= OK
counter = 0 #count the number of dbta changed
for row in query(u'''SELECT idta FROM ta WHERE '''+wherestring,where):
counter += 1
ta_from = OldTransaction(row['idta'])
ta_from.copyta(**change) #make new ta from ta_from, using parameters from change
ta_from.update(statust=DONE) #update 'old' ta
return counter
def addinfo(change,where):
''' add/changes information in db-ta's by coping the ta's; the status is updated.
using only change and where dict.'''
wherestring = ' AND '.join([key+'=%('+key+')s ' for key in where]) #wherestring for copy & done
return addinfocore(change=change,where=where,wherestring=wherestring)
def updateinfo(change,where):
''' update info in ta if not set; no status change.
where-dict selects db-ta's, change-dict sets values;
returns the number of db-ta that have been changed.
'''
if 'statust' not in where:
where['statust']=OK
wherestring = ' AND '.join([key+'=%('+key+')s ' for key in where]) #wherestring for copy & done
if 'rootidta' not in where:
where['rootidta']=get_minta4query()
wherestring = ' idta > %(rootidta)s AND ' + wherestring
counter = 0 #count the number of dbta changed
for row in query(u'''SELECT idta FROM ta WHERE '''+wherestring,where):
counter += 1
ta_from = OldTransaction(row['idta'])
ta_from.synall()
defchange = {}
for key,value in change.items():
if value and not getattr(ta_from,key,None): #if there is a value and the key is not set in ta_from:
defchange[key]=value
ta_from.update(**defchange)
return counter
def changestatustinfo(change,where):
''' update info in ta if not set; no status change.
where-dict selects db-ta's, change is the new statust;
returns the number of db-ta that have been changed.
'''
if not isinstance(change,int):
raise BotsError(_(u'change not valid: expect status to be an integer. Programming error.'))
if 'statust' not in where:
where['statust']=OK
wherestring = ' AND '.join([key+'=%('+key+')s ' for key in where]) #wherestring for copy & done
if 'rootidta' not in where:
where['rootidta']=get_minta4query()
wherestring = ' idta > %(rootidta)s AND ' + wherestring
counter = 0 #count the number of dbta changed
for row in query(u'''SELECT idta FROM ta WHERE '''+wherestring,where):
counter += 1
ta_from = OldTransaction(row['idta'])
ta_from.update(statust = change)
return counter
#**********************************************************/**
#*************************Database***********************/**
#**********************************************************/**
def set_database_lock():
try:
change(u'''INSERT INTO mutex (mutexk) VALUES (1)''')
except:
return False
return True
def remove_database_lock():
change('''DELETE FROM mutex WHERE mutexk=1''')
def query(querystring,*args):
''' general query. yields rows from query '''
cursor = botsglobal.db.cursor()
cursor.execute(querystring,*args)
results = cursor.fetchall()
cursor.close()
for result in results:
yield result
def change(querystring,*args):
'''general inset/update. no return'''
cursor = botsglobal.db.cursor()
try:
cursor.execute(querystring,*args)
except: #IntegrityError from postgresql
botsglobal.db.rollback()
raise
botsglobal.db.commit()
cursor.close()
def unique(domein):
''' generate unique number within range domain.
uses db to keep track of last generated number
if domain not used before, initialize with 1.
'''
cursor = botsglobal.db.cursor()
try:
cursor.execute(u'''UPDATE uniek SET nummer=nummer+1 WHERE domein=%(domein)s''',{'domein':domein})
cursor.execute(u'''SELECT nummer FROM uniek WHERE domein=%(domein)s''',{'domein':domein})
nummer = cursor.fetchone()['nummer']
except: # ???.DatabaseError; domein does not exist
cursor.execute(u'''INSERT INTO uniek (domein) VALUES (%(domein)s)''',{'domein': domein})
nummer = 1
if nummer > sys.maxint-2:
nummer = 1
cursor.execute(u'''UPDATE uniek SET nummer=1 WHERE domein=%(domein)s''',{'domein':domein})
botsglobal.db.commit()
cursor.close()
return nummer
def checkunique(domein, receivednumber):
''' to check of received number is sequential: value is compare with earlier received value.
if domain not used before, initialize it . '1' is the first value expected.
'''
cursor = botsglobal.db.cursor()
try:
cursor.execute(u'''SELECT nummer FROM uniek WHERE domein=%(domein)s''',{'domein':domein})
expectednumber = cursor.fetchone()['nummer'] + 1
except: # ???.DatabaseError; domein does not exist
cursor.execute(u'''INSERT INTO uniek (domein,nummer) VALUES (%(domein)s,0)''',{'domein': domein})
expectednumber = 1
if expectednumber == receivednumber:
if expectednumber > sys.maxint-2:
nummer = 1
cursor.execute(u'''UPDATE uniek SET nummer=nummer+1 WHERE domein=%(domein)s''',{'domein':domein})
terug = True
else:
terug = False
botsglobal.db.commit()
cursor.close()
return terug
def keeptrackoflastretry(domein,newlastta):
''' keep track of last automaticretrycommunication/retry
if domain not used before, initialize it . '1' is the first value expected.
'''
cursor = botsglobal.db.cursor()
try:
cursor.execute(u'''SELECT nummer FROM uniek WHERE domein=%(domein)s''',{'domein':domein})
oldlastta = cursor.fetchone()['nummer']
except: # ???.DatabaseError; domein does not exist
cursor.execute(u'''INSERT INTO uniek (domein) VALUES (%(domein)s)''',{'domein': domein})
oldlastta = 1
cursor.execute(u'''UPDATE uniek SET nummer=%(nummer)s WHERE domein=%(domein)s''',{'domein':domein,'nummer':newlastta})
botsglobal.db.commit()
cursor.close()
return oldlastta
#**********************************************************/**
#*************************Logging, Error handling********************/**
#**********************************************************/**
def sendbotserrorreport(subject,reporttext):
if botsglobal.ini.getboolean('settings','sendreportiferror',False):
from django.core.mail import mail_managers
try:
mail_managers(subject, reporttext)
except:
botsglobal.logger.debug(u'Error in sending error report: %s',txtexc())
def log_session(f):
''' used as decorator.
The decorated functions are logged as processes.
Errors in these functions are caught and logged.
'''
def wrapper(*args,**argv):
try:
ta_session = NewProcess(f.__name__)
except:
botsglobal.logger.exception(u'System error - no new session made')
raise
try:
terug =f(*args,**argv)
except:
txt=txtexc()
botsglobal.logger.debug(u'Error in process: %s',txt)
ta_session.update(statust=ERROR,errortext=txt)
else:
ta_session.update(statust=DONE)
return terug
return wrapper
def txtexc():
''' Get text from last exception '''
if botsglobal.ini:
if botsglobal.ini.getboolean('settings','debug',False):
limit = None
else:
limit=0
else:
limit=0
#problems with char set for some input data that are reported in traces....so always decode this;
terug = traceback.format_exc(limit).decode('utf-8','ignore')
#~ botsglobal.logger.debug(u'exception %s',terug)
if hasattr(botsglobal,'dbinfo') and botsglobal.dbinfo.drivername != 'sqlite': #sqlite does not enforce strict lengths
return terug[-1848:] #filed isze is 2048; but more text can be prepended.
else:
return terug
class ErrorProcess(NewTransaction):
''' Used in logging of errors in processes.
20110828: used in communication.py
'''
def __init__(self,functionname='',errortext='',channeldict=None):
fromchannel = tochannel = ''
if channeldict:
if channeldict['inorout'] == 'in':
fromchannel = channeldict['idchannel']
else:
tochannel = channeldict['idchannel']
super(ErrorProcess,self).__init__(filename=functionname,status=PROCESS,idroute=getrouteid(),statust=ERROR,errortext=errortext,fromchannel=fromchannel,tochannel=tochannel)
#**********************************************************/**
#*************************File handling os.path, imports etc***********************/**
#**********************************************************/**
def botsbaseimport(modulename):
''' Do a dynamic import.
Errors/exceptions are handled in calling functions.
'''
if modulename.startswith('.'):
modulename = modulename[1:]
module = __import__(modulename)
components = modulename.split('.')
for comp in components[1:]:
module = getattr(module, comp)
return module
def botsimport(soort,modulename):
''' import modules from usersys.
return: imported module, filename imported module;
if could not be found or error in module: raise
'''
try: #__import__ is picky on the charset used. Might be different for different OS'es. So: test if charset is us-ascii
modulename.encode('ascii')
except UnicodeEncodeError: #if not us-ascii, convert to punycode
modulename = modulename.encode('punycode')
modulepath = '.'.join((botsglobal.usersysimportpath,soort,modulename)) #assemble import string
modulefile = join(botsglobal.usersysimportpath,soort,modulename) #assemble abs filename for errortexts
try:
module = botsbaseimport(modulepath)
except ImportError: #if module not found
botsglobal.logger.debug(u'no import of "%s".',modulefile)
raise
except: #other errors
txt=txtexc()
raise ScriptImportError(_(u'import error in "$module", error:\n$txt'),module=modulefile,txt=txt)
else:
botsglobal.logger.debug(u'import "%s".',modulefile)
return module,modulefile
def join(*paths):
'''Does does more as join.....
- join the paths (compare os.path.join)
- if path is not absolute, interpretate this as relative from bots directory.
- normalize'''
return os.path.normpath(os.path.join(botsglobal.ini.get('directories','botspath'),*paths))
def dirshouldbethere(path):
if path and not os.path.exists(path):
os.makedirs(path)
return True
return False
def abspath(soort,filename):
''' get absolute path for internal files; path is a section in bots.ini '''
directory = botsglobal.ini.get('directories',soort)
return join(directory,filename)
def abspathdata(filename):
''' abspathdata if filename incl dir: return absolute path; else (only filename): return absolute path (datadir)'''
if '/' in filename: #if filename already contains path
return join(filename)
else:
directory = botsglobal.ini.get('directories','data')
datasubdir = filename[:-3]
if not datasubdir:
datasubdir = '0'
return join(directory,datasubdir,filename)
def opendata(filename,mode,charset=None,errors=None):
''' open internal data file. if no encoding specified: read file raw/binary.'''
filename = abspathdata(filename)
if 'w' in mode:
dirshouldbethere(os.path.dirname(filename))
if charset:
return codecs.open(filename,mode,charset,errors)
else:
return open(filename,mode)
def readdata(filename,charset=None,errors=None):
''' read internal data file in memory using the right encoding or no encoding'''
f = opendata(filename,'rb',charset,errors)
content = f.read()
f.close()
return content
#**********************************************************/**
#*************************calling modules, programs***********************/**
#**********************************************************/**
def runscript(module,modulefile,functioninscript,**argv):
''' Execute user script. Functioninscript is supposed to be there; if not AttributeError is raised.
Often is checked in advance if Functioninscript does exist.
'''
botsglobal.logger.debug(u'run user script "%s" in "%s".',functioninscript,modulefile)
functiontorun = getattr(module, functioninscript)
try:
return functiontorun(**argv)
except:
txt=txtexc()
raise ScriptError(_(u'Script file "$filename": "$txt".'),filename=modulefile,txt=txt)
def tryrunscript(module,modulefile,functioninscript,**argv):
if module and hasattr(module,functioninscript):
runscript(module,modulefile,functioninscript,**argv)
return True
return False
def runscriptyield(module,modulefile,functioninscript,**argv):
botsglobal.logger.debug(u'run user (yield) script "%s" in "%s".',functioninscript,modulefile)
functiontorun = getattr(module, functioninscript)
try:
for result in functiontorun(**argv):
yield result
except:
txt=txtexc()
raise ScriptError(_(u'Script file "$filename": "$txt".'),filename=modulefile,txt=txt)
def runexternprogram(*args):
botsglobal.logger.debug(u'run external program "%s".',args)
path = os.path.dirname(args[0])
try:
subprocess.call(list(args),cwd=path)
except:
txt=txtexc()
raise OSError(_(u'error running extern program "%(program)s", error:\n%(error)s'%{'program':args,'error':txt}))
#**********************************************************/**
#***************############### mdn #############
#**********************************************************/**
def checkconfirmrules(confirmtype,**kwargs):
terug = False #boolean to return: ask a confirm of not?
for confirmdict in query(u'''SELECT ruletype,idroute,idchannel_id as idchannel,frompartner_id as frompartner,topartner_id as topartner,editype,messagetype,negativerule
FROM confirmrule
WHERE active=%(active)s
AND confirmtype=%(confirmtype)s
ORDER BY negativerule ASC
''',
{'active':True,'confirmtype':confirmtype}):
if confirmdict['ruletype']=='all':
terug = not confirmdict['negativerule']
elif confirmdict['ruletype']=='route':
if 'idroute' in kwargs and confirmdict['idroute'] == kwargs['idroute']:
terug = not confirmdict['negativerule']
elif confirmdict['ruletype']=='channel':
if 'idchannel' in kwargs and confirmdict['idchannel'] == kwargs['idchannel']:
terug = not confirmdict['negativerule']
elif confirmdict['ruletype']=='frompartner':
if 'frompartner' in kwargs and confirmdict['frompartner'] == kwargs['frompartner']:
terug = not confirmdict['negativerule']
elif confirmdict['ruletype']=='topartner':
if 'topartner' in kwargs and confirmdict['topartner'] == kwargs['topartner']:
terug = not confirmdict['negativerule']
elif confirmdict['ruletype']=='messagetype':
if 'editype' in kwargs and confirmdict['editype'] == kwargs['editype'] and 'messagetype' in kwargs and confirmdict['messagetype'] == kwargs['messagetype']:
terug = not confirmdict['negativerule']
#~ print '>>>>>>>>>>>>', terug,confirmtype,kwargs
return terug
#**********************************************************/**
#***************############### codecs #############
#**********************************************************/**
def getcodeccanonicalname(codecname):
c = codecs.lookup(codecname)
return c.name
def checkcodeciscompatible(charset1,charset2):
''' check if charset of edifile) is 'compatible' with charset of channel: OK; else: raise exception
'''
#some codecs are upward compatible (subsets); charsetcompatible is used to check if charsets are upward compatibel with each other.
#some charset are 1 byte (ascii, ISO-8859-*). others are more bytes (UTF-16, utf-32. UTF-8 is more bytes, but is ascii compatible.
charsetcompatible = {
'unoa':['unob','ascii','utf-8','iso8859-1','cp1252','iso8859-15'],
'unob':['ascii','utf-8','iso8859-1','cp1252','iso8859-15'],
'ascii':['utf-8','iso8859-1','cp1252','iso8859-15'],
}
charset_edifile = getcodeccanonicalname(charset1)
charset_channel = getcodeccanonicalname(charset2)
if charset_channel == charset_edifile:
return True
if charset_edifile in charsetcompatible and charset_channel in charsetcompatible[charset_edifile]:
return True
raise CommunicationOutError(_(u'Charset "$charset2" for channel not matching with charset "$charset1" for edi-file.'),charset1=charset1,charset2=charset2)
#**********************************************************/**
#***************############### misc. #############
#**********************************************************/**
class Uri(object):
''' generate uri from parts. '''
def __init__(self,**kw):
self.uriparts = dict(scheme='',username='',password='',host='',port='',path='',parameters='',filename='',query={},fragment='')
self.uriparts.update(**kw)
def update(self,**kw):
self.uriparts.update(kw)
return self.uri
@property #the getter
def uri(self):
if not self.uriparts['scheme']:
raise BotsError(_(u'No scheme in uri.'))
#assemble complete host name
fullhost = ''
if self.uriparts['username']: #always use both?
fullhost += self.uriparts['username'] + '@'
if self.uriparts['host']:
fullhost += self.uriparts['host']
if self.uriparts['port']:
fullhost += ':' + str(self.uriparts['port'])
#assemble complete path
if self.uriparts['path'].strip().endswith('/'):
fullpath = self.uriparts['path'] + self.uriparts['filename']
else:
fullpath = self.uriparts['path'] + '/' + self.uriparts['filename']
if fullpath.endswith('/'):
fullpath = fullpath[:-1]
_uri = urlparse.urlunparse((self.uriparts['scheme'],fullhost,fullpath,self.uriparts['parameters'],urllib.urlencode(self.uriparts['query']),self.uriparts['fragment']))
if not _uri:
raise BotsError(_(u'Uri is empty.'))
return _uri
def settimeout(milliseconds):
socket.setdefaulttimeout(milliseconds) #set a time-out for TCP-IP connections
def countunripchars(value,delchars):
return len([c for c in value if c not in delchars])
def updateunlessset(updatedict,fromdict):
for key, value in fromdict.items():
if key not in updatedict:
updatedict[key]=value
#**********************************************************/**
#************** Exception classes ***************************
#**********************************************************/**
class BotsError(Exception):
def __init__(self, msg,**kwargs):
self.msg = msg
self.kwargs = kwargs
def __str__(self):
s = string.Template(self.msg).safe_substitute(self.kwargs)
return s.encode(u'utf-8',u'ignore')
class CodeConversionError(BotsError):
pass
class CommunicationError(BotsError):
pass
class CommunicationInError(BotsError):
pass
class CommunicationOutError(BotsError):
pass
class EanError(BotsError):
pass
class GrammarError(BotsError): #grammar.py
pass
class InMessageError(BotsError):
pass
class InMessageFieldError(BotsError):
pass
class LockedFileError(BotsError):
pass
class MessageError(BotsError):
pass
class MappingRootError(BotsError):
pass
class MappingFormatError(BotsError): #mpath is not valid; mapth will mostly come from mapping-script
pass
class OutMessageError(BotsError):
pass
class PanicError(BotsError):
pass
class PersistError(BotsError):
pass
class PluginError(BotsError):
pass
class ScriptImportError(BotsError): #can not find script; not for errors in a script
pass
class ScriptError(BotsError): #runtime errors in a script
pass
class TraceError(BotsError):
pass
class TraceNotPickedUpError(BotsError):
pass
class TranslationNotFoundError(BotsError):
pass
| Python |
"""SMTP over SSL client.
used for python < 2.5
in python 2.6 and up the smtp-library has a class SMTP_SSL
Public class: SMTP_SSL
Public errors: SMTPSSLException
"""
# Author: Matt Butcher <mbutche@luc.edu>, Feb. 2007
# License: MIT License (or, at your option, the GPL, v.2 or later as posted at
# http://gnu.org).
##
## Begin License
#
# Copyright (c) 2007 M Butcher
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
##
##End License
#
# This is just a minor modification to the smtplib code by Dragon De Monsyn.
import smtplib, socket
__version__ = "1.00"
__all__ = ['SMTPSSLException', 'SMTP_SSL']
SSMTP_PORT = 465
class SMTPSSLException(smtplib.SMTPException):
"""Base class for exceptions resulting from SSL negotiation."""
class SMTP_SSL (smtplib.SMTP):
"""This class provides SSL access to an SMTP server.
SMTP over SSL typical listens on port 465. Unlike StartTLS, SMTP over SSL
makes an SSL connection before doing a helo/ehlo. All transactions, then,
are done over an encrypted channel.
This class is a simple subclass of the smtplib.SMTP class that comes with
Python. It overrides the connect() method to use an SSL socket, and it
overrides the starttls() function to throw an error (you can't do
starttls within an SSL session).
"""
certfile = None
keyfile = None
def __init__(self, host='', port=0, local_hostname=None, keyfile=None, certfile=None):
"""Initialize a new SSL SMTP object.
If specified, `host' is the name of the remote host to which this object
will connect. If specified, `port' specifies the port (on `host') to
which this object will connect. `local_hostname' is the name of the
localhost. By default, the value of socket.getfqdn() is used.
An SMTPConnectError is raised if the SMTP host does not respond
correctly.
An SMTPSSLError is raised if SSL negotiation fails.
Warning: This object uses socket.ssl(), which does not do client-side
verification of the server's cert.
"""
self.certfile = certfile
self.keyfile = keyfile
smtplib.SMTP.__init__(self, host, port, local_hostname)
def connect(self, host='localhost', port=0):
"""Connect to an SMTP server using SSL.
`host' is localhost by default. Port will be set to 465 (the default
SSL SMTP port) if no port is specified.
If the host name ends with a colon (`:') followed by a number,
that suffix will be stripped off and the
number interpreted as the port number to use. This will override the
`port' parameter.
Note: This method is automatically invoked by __init__, if a host is
specified during instantiation.
"""
# MB: Most of this (Except for the socket connection code) is from
# the SMTP.connect() method. I changed only the bare minimum for the
# sake of compatibility.
if not port and (host.find(':') == host.rfind(':')):
i = host.rfind(':')
if i >= 0:
host, port = host[:i], host[i+1:]
try: port = int(port)
except ValueError:
raise socket.error, "nonnumeric port"
if not port: port = SSMTP_PORT
if self.debuglevel > 0: print>>stderr, 'connect:', (host, port)
msg = "getaddrinfo returns an empty list"
self.sock = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
if self.debuglevel > 0: print>>stderr, 'connect:', (host, port)
self.sock.connect(sa)
# MB: Make the SSL connection.
sslobj = socket.ssl(self.sock, self.keyfile, self.certfile)
except socket.error, msg:
if self.debuglevel > 0:
print>>stderr, 'connect fail:', (host, port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
# MB: Now set up fake socket and fake file classes.
# Thanks to the design of smtplib, this is all we need to do
# to get SSL working with all other methods.
self.sock = smtplib.SSLFakeSocket(self.sock, sslobj)
self.file = smtplib.SSLFakeFile(sslobj);
(code, msg) = self.getreply()
if self.debuglevel > 0: print>>stderr, "connect:", msg
return (code, msg)
def setkeyfile(self, keyfile):
"""Set the absolute path to a file containing a private key.
This method will only be effective if it is called before connect().
This key will be used to make the SSL connection."""
self.keyfile = keyfile
def setcertfile(self, certfile):
"""Set the absolute path to a file containing a x.509 certificate.
This method will only be effective if it is called before connect().
This certificate will be used to make the SSL connection."""
self.certfile = certfile
def starttls():
"""Raises an exception.
You cannot do StartTLS inside of an ssl session. Calling starttls() will
return an SMTPSSLException"""
raise SMTPSSLException, "Cannot perform StartTLS within SSL session."
| Python |
""" Python Character Mapping Codec generated from CP1252.TXT with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
Adapted by Henk-Jan Ebbers for Bots open source EDI translator
Regular UNOB: UNOB char, CR, LF and Crtl-Z
"""
import codecs
import sys
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
def getregentry():
return codecs.CodecInfo(
name='unob',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
#decoding_map = codecs.make_identity_dict(range(128))
#decoding_map.update({
decoding_map = {
# 0x0000:0x0000, #NUL
# 0x0001:0x0000, #SOH
# 0x0002:0x0000, #STX
# 0x0003:0x0000, #ETX
# 0x0004:0x0000, #EOT
# 0x0005:0x0000, #ENQ
# 0x0006:0x0000, #ACK
# 0x0007:0x0000, #Bell
# 0x0008:0x0000, #BackSpace
# 0x0009:0x0000, #Tab
0x000a:0x000a, #lf
# 0x000b:0x0000, #Vertical Tab
# 0x000c:0x0000, #FormFeed
0x000d:0x000d, #cr
# 0x000e:0x0000, #SO
# 0x000f:0x0000, #SI
# 0x0010:0x0000, #DLE
# 0x0011:0x0000, #DC1
# 0x0012:0x0000, #DC2
# 0x0013:0x0000, #DC3
# 0x0014:0x0000, #DC4
# 0x0015:0x0000, #NAK
# 0x0016:0x0000, #SYN
# 0x0017:0x0000, #ETB
# 0x0018:0x0000, #CAN
# 0x0019:0x0000, #EM
0x001a:0x001a, #SUB, cntrl-Z
# 0x001b:0x0000, #ESC
0x001c:0x001c, #FS
0x001d:0x001d, #GS
# 0x001e:0x0000, #RS
0x001f:0x001f, #US
0x0020:0x0020, #<SPACE>
0x0021:0x0021, #!
0x0022:0x0022, #"
# 0x0023:0x0023, ##
# 0x0024:0x0024, #$
0x0025:0x0025, #%
0x0026:0x0026, #&
0x0027:0x0027, #'
0x0028:0x0028, #(
0x0029:0x0029, #)
0x002A:0x002A, #*
0x002B:0x002B, #+
0x002C:0x002C, #,
0x002D:0x002D, #-
0x002E:0x002E, #.
0x002F:0x002F, #/
0x0030:0x0030, #0
0x0031:0x0031, #1
0x0032:0x0032, #2
0x0033:0x0033, #3
0x0034:0x0034, #4
0x0035:0x0035, #5
0x0036:0x0036, #6
0x0037:0x0037, #7
0x0038:0x0038, #8
0x0039:0x0039, #9
0x003A:0x003A, #:
0x003B:0x003B, #;
0x003C:0x003C, #<
0x003D:0x003D, #=
0x003E:0x003E, #>
0x003F:0x003F, #?
# 0x0040:0x0040, #@
0x0041:0x0041, #A
0x0042:0x0042, #B
0x0043:0x0043, #C
0x0044:0x0044, #D
0x0045:0x0045, #E
0x0046:0x0046, #F
0x0047:0x0047, #G
0x0048:0x0048, #H
0x0049:0x0049, #I
0x004A:0x004A, #J
0x004B:0x004B, #K
0x004C:0x004C, #L
0x004D:0x004D, #M
0x004E:0x004E, #N
0x004F:0x004F, #O
0x0050:0x0050, #P
0x0051:0x0051, #Q
0x0052:0x0052, #R
0x0053:0x0053, #S
0x0054:0x0054, #T
0x0055:0x0055, #U
0x0056:0x0056, #V
0x0057:0x0057, #W
0x0058:0x0058, #X
0x0059:0x0059, #Y
0x005A:0x005A, #Z
# 0x005B:0x005B, #[
# 0x005C:0x005C, #\
# 0x005D:0x005D, #]
# 0x005E:0x005E, #^
# 0x005F:0x005F, #_
# 0x0060:0x0060, #`
0x0061:0x0061, #a
0x0062:0x0062, #b
0x0063:0x0063, #c
0x0064:0x0064, #d
0x0065:0x0065, #e
0x0066:0x0066, #f
0x0067:0x0067, #g
0x0068:0x0068, #h
0x0069:0x0069, #i
0x006a:0x006a, #j
0x006b:0x006b, #k
0x006c:0x006c, #l
0x006d:0x006d, #m
0x006e:0x006e, #n
0x006f:0x006f, #o
0x0070:0x0070, #p
0x0071:0x0071, #q
0x0072:0x0072, #r
0x0073:0x0073, #s
0x0074:0x0074, #t
0x0075:0x0075, #u
0x0076:0x0076, #v
0x0077:0x0077, #w
0x0078:0x0078, #x
0x0079:0x0079, #y
0x007a:0x007a, #z
# 0x007B:0x007B, #{
# 0x007C:0x007C, #|
# 0x007D:0x007D, #}
# 0x007E:0x007E, #~
# 0x007F:0x007F, #DEL
}
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
| Python |
""" Python Character Mapping Codec generated from CP1252.TXT with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
Adapted by Henk-Jan Ebbers for Bots open source EDI translator
Regular UNOA: UNOA char, CR, LF and Crtl-Z
"""
import codecs
import sys
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
def getregentry():
return codecs.CodecInfo(
name='unoa',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
#decoding_map = codecs.make_identity_dict(range(128))
#decoding_map.update({
decoding_map = {
# 0x0000:0x0000, #NUL
# 0x0001:0x0000, #SOH
# 0x0002:0x0000, #STX
# 0x0003:0x0000, #ETX
# 0x0004:0x0000, #EOT
# 0x0005:0x0000, #ENQ
# 0x0006:0x0000, #ACK
# 0x0007:0x0000, #Bell
# 0x0008:0x0000, #BackSpace
# 0x0009:0x0000, #Tab
0x000a:0x000a, #lf
# 0x000b:0x0000, #Vertical Tab
# 0x000c:0x0000, #FormFeed
0x000d:0x000d, #cr
# 0x000e:0x0000, #SO
# 0x000f:0x0000, #SI
# 0x0010:0x0000, #DLE
# 0x0011:0x0000, #DC1
# 0x0012:0x0000, #DC2
# 0x0013:0x0000, #DC3
# 0x0014:0x0000, #DC4
# 0x0015:0x0000, #NAK
# 0x0016:0x0000, #SYN
# 0x0017:0x0000, #ETB
# 0x0018:0x0000, #CAN
# 0x0019:0x0000, #EM
0x001a:0x001a, #SUB, cntrl-Z
# 0x001b:0x0000, #ESC
# 0x001c:0x0000, #FS
# 0x001d:0x0000, #GS
# 0x001e:0x0000, #RS
# 0x001f:0x0000, #US
0x0020:0x0020, #<space>
0x0021:0x0021, #!
0x0022:0x0022, #"
# 0x0023:0x0023, ##
# 0x0024:0x0024, #$
0x0025:0x0025, #%
0x0026:0x0026, #&
0x0027:0x0027, #'
0x0028:0x0028, #(
0x0029:0x0029, #)
0x002a:0x002a, #*
0x002b:0x002b, #+
0x002c:0x002c, #,
0x002d:0x002d, #-
0x002e:0x002e, #.
0x002f:0x002f, #/
0x0030:0x0030, #0
0x0031:0x0031, #1
0x0032:0x0032, #2
0x0033:0x0033, #3
0x0034:0x0034, #4
0x0035:0x0035, #5
0x0036:0x0036, #6
0x0037:0x0037, #7
0x0038:0x0038, #8
0x0039:0x0039, #9
0x003a:0x003a, #:
0x003b:0x003b, #;
0x003c:0x003c, #<
0x003d:0x003d, #=
0x003e:0x003e, #>
0x003f:0x003f, #?
# 0x0040:0x0040, #@
0X0041:0X0041, #A
0X0042:0X0042, #B
0X0043:0X0043, #C
0X0044:0X0044, #D
0X0045:0X0045, #E
0X0046:0X0046, #F
0X0047:0X0047, #G
0X0048:0X0048, #H
0X0049:0X0049, #I
0X004A:0X004A, #J
0X004B:0X004B, #K
0X004C:0X004C, #L
0X004D:0X004D, #M
0X004E:0X004E, #N
0X004F:0X004F, #O
0X0050:0X0050, #P
0X0051:0X0051, #Q
0X0052:0X0052, #R
0X0053:0X0053, #S
0X0054:0X0054, #T
0X0055:0X0055, #U
0X0056:0X0056, #V
0X0057:0X0057, #W
0X0058:0X0058, #X
0X0059:0X0059, #Y
0X005A:0X005A, #Z
# 0x005b:0x005b, #[
# 0x005c:0x005c, #\
# 0x005d:0x005d, #]
# 0x005e:0x005e, #^
# 0x005f:0x005f, #_
# 0x0060:0x0060, #`
# 0x0061:0x0041, #a
# 0x0062:0x0042, #b
# 0x0063:0x0043, #c
# 0x0064:0x0044, #d
# 0x0065:0x0045, #e
# 0x0066:0x0046, #f
# 0x0067:0x0047, #g
# 0x0068:0x0048, #h
# 0x0069:0x0049, #i
# 0x006a:0x004a, #j
# 0x006b:0x004b, #k
# 0x006c:0x004c, #l
# 0x006d:0x004d, #m
# 0x006e:0x004e, #n
# 0x006f:0x004f, #o
# 0x0070:0x0050, #p
# 0x0071:0x0051, #q
# 0x0072:0x0052, #r
# 0x0073:0x0053, #s
# 0x0074:0x0054, #t
# 0x0075:0x0055, #u
# 0x0076:0x0056, #v
# 0x0077:0x0057, #w
# 0x0078:0x0058, #x
# 0x0079:0x0059, #y
# 0x007a:0x005a, #z
# 0x007b:0x007b, #{
# 0x007c:0x007c, #|
# 0x007d:0x007d, #}
# 0x007e:0x007e, #~
# 0x007f:0x007f, #del
}
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
| Python |
codeconversions = {
'351':'AAK',
'35E':'AAK',
'220':'ON',
'224':'ON',
'50E':'ON',
'83':'IV',
'380':'IV',
'384':'IV',
'TESTIN':'TESTOUT',
}
| Python |
import time
import sys
try:
import cPickle as pickle
except:
import pickle
import decimal
NODECIMAL = decimal.Decimal(1)
try:
import cElementTree as ET
#~ print 'imported cElementTree'
except ImportError:
try:
import elementtree.ElementTree as ET
#~ print 'imported elementtree.ElementTree'
except ImportError:
try:
from xml.etree import cElementTree as ET
#~ print 'imported xml.etree.cElementTree'
except ImportError:
from xml.etree import ElementTree as ET
#~ print 'imported xml.etree.ElementTree'
#~ print ET.VERSION
try:
import elementtree.ElementInclude as ETI
except ImportError:
from xml.etree import ElementInclude as ETI
try:
import json as simplejson
except ImportError:
import simplejson
from django.utils.translation import ugettext as _
#bots-modules
import botslib
import botsglobal
import grammar
import message
import node
from botsconfig import *
def outmessage_init(**ta_info):
''' dispatch function class Outmessage or subclass
ta_info: needed is editype, messagetype, filename, charset, merge
'''
try:
classtocall = globals()[ta_info['editype']]
except KeyError:
raise botslib.OutMessageError(_(u'Unknown editype for outgoing message: $editype'),editype=ta_info['editype'])
return classtocall(ta_info)
class Outmessage(message.Message):
''' abstract class; represents a outgoing edi message.
subclassing is necessary for the editype (csv, edi, x12, etc)
A tree of nodes is build form the mpaths received from put()or putloop(). tree starts at self.root.
Put() recieves mpaths from mappingscript
The next algorithm is used to 'map' a mpath into the tree:
For each part of a mpath: search node in 'current' level of tree
If part already as a node:
recursively search node-children
If part not as a node:
append new node to tree;
recursively append next parts to tree
After the mapping-script is finished, the resulting tree is converted to records (self.records).
These records are written to file.
Structure of self.records:
list of record;
record is list of field
field is dict. Keys in field:
- ID field ID (id within this record). For in-file
- VALUE value, content of field
- MPATH mpath of record, only for first field(=recordID)
- LIN linenr of field in in-file
- POS positionnr within line in in-file
- SFIELD True if subfield (edifact-only)
first field for record is recordID.
'''
def __init__(self,ta_info):
self.ta_info = ta_info
self.root = node.Node(record={}) #message tree; build via put()-interface in mapping-script. Initialise with empty dict
super(Outmessage,self).__init__()
def outmessagegrammarread(self,editype,messagetype):
''' read the grammar for a out-message.
try to read the topartner dependent grammar syntax.
'''
self.defmessage = grammar.grammarread(editype,messagetype)
self.defmessage.display(self.defmessage.structure)
#~ print 'self.ta_info',self.ta_info
#~ print 'self.defmessage.syntax',self.defmessage.syntax
botslib.updateunlessset(self.ta_info,self.defmessage.syntax) #write values from grammar to self.ta_info - unless these values are already set eg by mapping script
if self.ta_info['topartner']: #read syntax-file for partner dependent syntax
try:
partnersyntax = grammar.syntaxread('partners',editype,self.ta_info['topartner'])
self.ta_info.update(partnersyntax.syntax) #partner syntax overrules!
except ImportError:
pass #No partner specific syntax found (is not an error).
def writeall(self):
''' writeall is called for writing all 'real' outmessage objects; but not for envelopes.
writeall is call from transform.translate()
'''
self.outmessagegrammarread(self.ta_info['editype'],self.ta_info['messagetype'])
self.nrmessagewritten = 0
if self.root.record: #root record contains information; write whole tree in one time
self.multiplewrite = False
self.normalisetree(self.root)
self._initwrite()
self._write(self.root)
self.nrmessagewritten = 1
self._closewrite()
elif not self.root.children:
raise botslib.OutMessageError(_(u'No outgoing message')) #then there is nothing to write...
else:
self.multiplewrite = True
for childnode in self.root.children:
self.normalisetree(childnode)
self._initwrite()
for childnode in self.root.children:
self._write(childnode)
self.nrmessagewritten += 1
self._closewrite()
def _initwrite(self):
botsglobal.logger.debug(u'Start writing to file "%s".',self.ta_info['filename'])
self._outstream = botslib.opendata(self.ta_info['filename'],'wb',charset=self.ta_info['charset'],errors=self.ta_info['checkcharsetout'])
def _closewrite(self):
botsglobal.logger.debug(u'End writing to file "%s".',self.ta_info['filename'])
self._outstream.close()
def _write(self,node):
''' the write method for most classes.
tree is serialised to sequential records; records are written to file.
Classses that write using other libraries (xml, json, template, db) use specific write methods.
'''
self.tree2records(node)
self._records2file()
def tree2records(self,node):
self.records = [] #tree of nodes is flattened to these records
self._tree2recordscore(node,self.defmessage.structure[0])
def _tree2recordscore(self,node,structure):
''' Write tree of nodes to flat records.
The nodes are already sorted
'''
self._tree2recordfields(node.record,structure) #write root node->first record
for childnode in node.children: #for every node in mpathtree, these are already sorted#SPEED: node.children is already sorted!
for structure_record in structure[LEVEL]: #for structure_record of this level in grammar
if childnode.record['BOTSID'] == structure_record[ID] and childnode.record['BOTSIDnr'] == structure_record[BOTSIDnr]: #if is is the right node:
self._tree2recordscore(childnode,structure_record) #use rest of index in deeper level
def _tree2recordfields(self,noderecord,structure_record):
''' appends fields in noderecord to (raw)record; use structure_record as guide.
complex because is is used for: editypes that have compression rules (edifact), var editypes without compression, fixed protocols
'''
buildrecord = [] #the record that is going to be build; list of dicts. Each dict is a field.
buffer = []
for grammarfield in structure_record[FIELDS]: #loop all fields in grammar-definition
if grammarfield[ISFIELD]: #if field (no composite)
if grammarfield[ID] in noderecord and noderecord[grammarfield[ID]]: #field exists in outgoing message and has data
buildrecord += buffer #write the buffer to buildrecord
buffer=[] #clear the buffer
buildrecord += [{VALUE:noderecord[grammarfield[ID]],SFIELD:False,FORMATFROMGRAMMAR:grammarfield[FORMAT]}] #append new field
else: #there is no data for this field
if self.ta_info['stripfield_sep']:
buffer += [{VALUE:'',SFIELD:False,FORMATFROMGRAMMAR:grammarfield[FORMAT]}] #append new empty to buffer;
else:
value = self._formatfield('',grammarfield,structure_record) #generate field
buildrecord += [{VALUE:value,SFIELD:False,FORMATFROMGRAMMAR:grammarfield[FORMAT]}] #append new field
else: #if composite
donefirst = False #used because first subfield in composite is marked as a field (not a subfield).
subbuffer=[] #buffer for this composite.
subiswritten=False #check if composite contains data
for grammarsubfield in grammarfield[SUBFIELDS]: #loop subfields
if grammarsubfield[ID] in noderecord and noderecord[grammarsubfield[ID]]: #field exists in outgoing message and has data
buildrecord += buffer #write buffer
buffer=[] #clear buffer
buildrecord += subbuffer #write subbuffer
subbuffer=[] #clear subbuffer
buildrecord += [{VALUE:noderecord[grammarsubfield[ID]],SFIELD:donefirst}] #append field
subiswritten = True
else:
if self.ta_info['stripfield_sep']:
subbuffer += [{VALUE:'',SFIELD:donefirst}] #append new empty to buffer;
else:
value = self._formatfield('',grammarsubfield,structure_record) #generate & append new field. For eg fixed and csv: all field have to be present
subbuffer += [{VALUE:value,SFIELD:donefirst}] #generate & append new field
donefirst = True
if not subiswritten: #if composite has no data: write placeholder for composite (stripping is done later)
buffer += [{VALUE:'',SFIELD:False}]
#~ print [buildrecord]
self.records += [buildrecord]
def _formatfield(self,value, grammarfield,record):
''' Input: value (as a string) and field definition.
Some parameters of self.syntax are used: decimaal
Format is checked and converted (if needed).
return the formatted value
'''
if grammarfield[BFORMAT] == 'A':
if isinstance(self,fixed): #check length fields in variable records
if grammarfield[FORMAT] == 'AR': #if field format is alfanumeric right aligned
value = value.rjust(grammarfield[MINLENGTH])
else:
value = value.ljust(grammarfield[MINLENGTH]) #add spaces (left, because A-field is right aligned)
valuelength=len(value)
if valuelength > grammarfield[LENGTH]:
raise botslib.OutMessageError(_(u'record "$mpath" field "$field" too big (max $max): "$content".'),field=grammarfield[ID],content=value,mpath=record[MPATH],max=grammarfield[LENGTH])
if valuelength < grammarfield[MINLENGTH]:
raise botslib.OutMessageError(_(u'record "$mpath" field "$field" too small (min $min): "$content".'),field=grammarfield[ID],content=value,mpath=record[MPATH],min=grammarfield[MINLENGTH])
elif grammarfield[BFORMAT] == 'D':
try:
lenght = len(value)
if lenght==6:
time.strptime(value,'%y%m%d')
elif lenght==8:
time.strptime(value,'%Y%m%d')
else:
raise ValueError(u'To be catched')
except ValueError:
raise botslib.OutMessageError(_(u'record "$mpath" field "$field" no valid date: "$content".'),field=grammarfield[ID],content=value,mpath=record[MPATH])
valuelength=len(value)
if valuelength > grammarfield[LENGTH]:
raise botslib.OutMessageError(_(u'record "$mpath" field "$field" too big (max $max): "$content".'),field=grammarfield[ID],content=value,mpath=record[MPATH],max=grammarfield[LENGTH])
if valuelength < grammarfield[MINLENGTH]:
raise botslib.OutMessageError(_(u'record "$mpath" field "$field" too small (min $min): "$content".'),field=grammarfield[ID],content=value,mpath=record[MPATH],min=grammarfield[MINLENGTH])
elif grammarfield[BFORMAT] == 'T':
try:
lenght = len(value)
if lenght==4:
time.strptime(value,'%H%M')
elif lenght==6:
time.strptime(value,'%H%M%S')
else: #lenght==8: #tsja...just use first part of field
raise ValueError(u'To be catched')
except ValueError:
raise botslib.OutMessageError(_(u'record "$mpath" field "$field" no valid time: "$content".'),field=grammarfield[ID],content=value,mpath=record[MPATH])
valuelength=len(value)
if valuelength > grammarfield[LENGTH]:
raise botslib.OutMessageError(_(u'record "$mpath" field "$field" too big (max $max): "$content".'),field=grammarfield[ID],content=value,mpath=record[MPATH],max=grammarfield[LENGTH])
if valuelength < grammarfield[MINLENGTH]:
raise botslib.OutMessageError(_(u'record "$mpath" field "$field" too small (min $min): "$content".'),field=grammarfield[ID],content=value,mpath=record[MPATH],min=grammarfield[MINLENGTH])
else: #numerics
if value or isinstance(self,fixed): #if empty string for non-fixed: just return. Later on, ta_info[stripemptyfield] determines what to do with them
if not value: #see last if; if a numerical fixed field has content '' , change this to '0' (init)
value='0'
else:
value = value.strip()
if value[0]=='-':
minussign = '-'
absvalue = value[1:]
else:
minussign = ''
absvalue = value
digits,decimalsign,decimals = absvalue.partition('.')
if not digits and not decimals:# and decimalsign:
raise botslib.OutMessageError(_(u'record "$mpath" field "$field" numerical format not valid: "$content".'),field=grammarfield[ID],content=value,mpath=record[MPATH])
if not digits:
digits = '0'
lengthcorrection = 0 #for some formats (if self.ta_info['lengthnumericbare']=True; eg edifact) length is calculated without decimal sing and/or minus sign.
if grammarfield[BFORMAT] == 'R': #floating point: use all decimals received
if self.ta_info['lengthnumericbare']:
if minussign:
lengthcorrection += 1
if decimalsign:
lengthcorrection += 1
try:
value = str(decimal.Decimal(minussign + digits + decimalsign + decimals).quantize(decimal.Decimal(10) ** -len(decimals)))
except:
raise botslib.OutMessageError(_(u'record "$mpath" field "$field" numerical format not valid: "$content".'),field=grammarfield[ID],content=value,mpath=record[MPATH])
if grammarfield[FORMAT] == 'RL': #if field format is numeric right aligned
value = value.ljust(grammarfield[MINLENGTH] + lengthcorrection)
elif grammarfield[FORMAT] == 'RR': #if field format is numeric right aligned
value = value.rjust(grammarfield[MINLENGTH] + lengthcorrection)
else:
value = value.zfill(grammarfield[MINLENGTH] + lengthcorrection)
value = value.replace('.',self.ta_info['decimaal'],1) #replace '.' by required decimal sep.
elif grammarfield[BFORMAT] == 'N': #fixed decimals; round
if self.ta_info['lengthnumericbare']:
if minussign:
lengthcorrection += 1
if grammarfield[DECIMALS]:
lengthcorrection += 1
try:
value = str(decimal.Decimal(minussign + digits + decimalsign + decimals).quantize(decimal.Decimal(10) ** -grammarfield[DECIMALS]))
except:
raise botslib.OutMessageError(_(u'record "$mpath" field "$field" numerical format not valid: "$content".'),field=grammarfield[ID],content=value,mpath=record[MPATH])
if grammarfield[FORMAT] == 'NL': #if field format is numeric right aligned
value = value.ljust(grammarfield[MINLENGTH] + lengthcorrection)
elif grammarfield[FORMAT] == 'NR': #if field format is numeric right aligned
value = value.rjust(grammarfield[MINLENGTH] + lengthcorrection)
else:
value = value.zfill(grammarfield[MINLENGTH] + lengthcorrection)
value = value.replace('.',self.ta_info['decimaal'],1) #replace '.' by required decimal sep.
elif grammarfield[BFORMAT] == 'I': #implicit decimals
if self.ta_info['lengthnumericbare']:
if minussign:
lengthcorrection += 1
try:
d = decimal.Decimal(minussign + digits + decimalsign + decimals) * 10**grammarfield[DECIMALS]
except:
raise botslib.OutMessageError(_(u'record "$mpath" field "$field" numerical format not valid: "$content".'),field=grammarfield[ID],content=value,mpath=record[MPATH])
value = str(d.quantize(NODECIMAL ))
value = value.zfill(grammarfield[MINLENGTH] + lengthcorrection)
if len(value)-lengthcorrection > grammarfield[LENGTH]:
raise botslib.OutMessageError(_(u'record "$mpath" field "$field": content to large: "$content".'),field=grammarfield[ID],content=value,mpath=record[MPATH])
return value
def _records2file(self):
''' convert self.records to a file.
using the right editype (edifact, x12, etc) and charset.
'''
wrap_length = int(self.ta_info.get('wrap_length', 0))
if wrap_length:
s = ''.join(self._record2string(r) for r in self.records) # join all records
for i in range(0,len(s),wrap_length): # then split in fixed lengths
try:
self._outstream.write(s[i:i+wrap_length] + '\r\n')
except UnicodeEncodeError:
raise botslib.OutMessageError(_(u'Chars in outmessage not in charset "$char": $content'),char=self.ta_info['charset'],content=s[i:i+wrap_length])
else:
for record in self.records: #loop all records
try:
self._outstream.write(self._record2string(record))
except UnicodeEncodeError: #, flup: testing with 2.7: flup did not contain the content.
raise botslib.OutMessageError(_(u'Chars in outmessage not in charset "$char": $content'),char=self.ta_info['charset'],content=str(record))
#code before 7 aug 2007 had other handling for flup. May have changed because python2.4->2.5?
def _record2string(self,record):
''' write (all fields of) a record using the right separators, escape etc
'''
sfield_sep = self.ta_info['sfield_sep']
if self.ta_info['record_tag_sep']:
record_tag_sep = self.ta_info['record_tag_sep']
else:
record_tag_sep = self.ta_info['field_sep']
field_sep = self.ta_info['field_sep']
quote_char = self.ta_info['quote_char']
escape = self.ta_info['escape']
record_sep = self.ta_info['record_sep'] + self.ta_info['add_crlfafterrecord_sep']
forcequote = self.ta_info['forcequote']
escapechars = self.getescapechars()
value = u'' #to collect separator/escape plus field content
fieldcount = 0
mode_quote = False
if self.ta_info['noBOTSID']: #for some csv-files: do not write BOTSID so remove it
del record[0]
for field in record: #loop all fields in record
if field[SFIELD]:
value += sfield_sep
else: #is a field:
if fieldcount == 0: #do nothing because first field in record is not preceded by a separator
fieldcount = 1
elif fieldcount == 1:
value += record_tag_sep
fieldcount = 2
else:
value += field_sep
if quote_char: #quote char only used for csv
start_to__quote=False
if forcequote == 2:
if field[FORMATFROMGRAMMAR] in ['AN','A','AR']:
start_to__quote=True
elif forcequote: #always quote; this catches values 1, '1', '0'
start_to__quote=True
else:
if field_sep in field[VALUE] or quote_char in field[VALUE] or record_sep in field[VALUE]:
start_to__quote=True
#TO DO test. if quote_char='' this works OK. Alt: check first if quote_char
if start_to__quote:
value += quote_char
mode_quote = True
for char in field[VALUE]: #use escape (edifact, tradacom). For x12 is warned if content contains separator
if char in escapechars:
if isinstance(self,x12):
if self.ta_info['replacechar']:
char = self.ta_info['replacechar']
else:
raise botslib.OutMessageError(_(u'Character "$char" is in use as separator in this x12 file. Field: "$data".'),char=char,data=field[VALUE])
else:
value +=escape
elif mode_quote and char==quote_char:
value +=quote_char
value += char
if mode_quote:
value += quote_char
mode_quote = False
value += record_sep
return value
def getescapechars(self):
return ''
class fixed(Outmessage):
pass
class idoc(fixed):
def _canonicalfields(self,noderecord,structure_record,headerrecordnumber):
if self.ta_info['automaticcount']:
noderecord.update({'MANDT':self.ta_info['MANDT'],'DOCNUM':self.ta_info['DOCNUM'],'SEGNUM':str(self.recordnumber),'PSGNUM':str(headerrecordnumber),'HLEVEL':str(len(structure_record[MPATH]))})
else:
noderecord.update({'MANDT':self.ta_info['MANDT'],'DOCNUM':self.ta_info['DOCNUM']})
super(idoc,self)._canonicalfields(noderecord,structure_record,headerrecordnumber)
self.recordnumber += 1 #tricky. EDI_DC is not counted, so I count after writing.
class var(Outmessage):
pass
class csv(var):
def getescapechars(self):
return self.ta_info['escape']
class edifact(var):
def getescapechars(self):
terug = self.ta_info['record_sep']+self.ta_info['field_sep']+self.ta_info['sfield_sep']+self.ta_info['escape']
if self.ta_info['version']>='4':
terug += self.ta_info['reserve']
return terug
class tradacoms(var):
def getescapechars(self):
terug = self.ta_info['record_sep']+self.ta_info['field_sep']+self.ta_info['sfield_sep']+self.ta_info['escape']+self.ta_info['record_tag_sep']
return terug
def writeall(self):
''' writeall is called for writing all 'real' outmessage objects; but not for enveloping.
writeall is call from transform.translate()
'''
self.nrmessagewritten = 0
if not self.root.children:
raise botslib.OutMessageError(_(u'No outgoing message')) #then there is nothing to write...
for message in self.root.getloop({'BOTSID':'STX'},{'BOTSID':'MHD'}):
self.outmessagegrammarread(self.ta_info['editype'],message.get({'BOTSID':'MHD','TYPE.01':None}) + message.get({'BOTSID':'MHD','TYPE.02':None}))
if not self.nrmessagewritten:
self._initwrite()
self.normalisetree(message)
self._write(message)
self.nrmessagewritten += 1
self._closewrite()
self.ta_info['nrmessages'] = self.nrmessagewritten
class x12(var):
def getescapechars(self):
terug = self.ta_info['record_sep']+self.ta_info['field_sep']+self.ta_info['sfield_sep']
if self.ta_info['version']>='00403':
terug += self.ta_info['reserve']
return terug
class xml(Outmessage):
''' 20110919: code for _write is almost the same as for envelopewrite.
this could be one method.
Some problems with right xml prolog, standalone, DOCTYPE, processing instructons: Different ET versions give different results:
celementtree in 2.7 is version 1.0.6, but different implementation in 2.6??
So: this works OK for python 2.7
For python <2.7: do not generate standalone, DOCTYPE, processing instructions for encoding !=utf-8,ascii OR if elementtree package is installed (version 1.3.0 or bigger)
'''
def _write(self,node):
''' write normal XML messages (no envelope)'''
xmltree = ET.ElementTree(self._node2xml(node))
root = xmltree.getroot()
self._xmlcorewrite(xmltree,root)
def envelopewrite(self,node):
''' write envelope for XML messages'''
self._initwrite()
self.normalisetree(node)
xmltree = ET.ElementTree(self._node2xml(node))
root = xmltree.getroot()
ETI.include(root)
self._xmlcorewrite(xmltree,root)
self._closewrite()
def _xmlcorewrite(self,xmltree,root):
#xml prolog: always use.*********************************
#standalone, DOCTYPE, processing instructions: only possible in python >= 2.7 or if encoding is utf-8/ascii
if sys.version >= '2.7.0' or self.ta_info['charset'] in ['us-ascii','utf-8'] or ET.VERSION >= '1.3.0':
if self.ta_info['indented']:
indentstring = '\n'
else:
indentstring = ''
if self.ta_info['standalone']:
standalonestring = 'standalone="%s" '%(self.ta_info['standalone'])
else:
standalonestring = ''
PI = ET.ProcessingInstruction('xml', 'version="%s" encoding="%s" %s'%(self.ta_info['version'],self.ta_info['charset'], standalonestring))
self._outstream.write(ET.tostring(PI) + indentstring) #do not use encoding here. gives double xml prolog; possibly because ET.ElementTree.write i used again by write()
#doctype /DTD **************************************
if self.ta_info['DOCTYPE']:
self._outstream.write('<!DOCTYPE %s>'%(self.ta_info['DOCTYPE']) + indentstring)
#processing instructions (other than prolog) ************
if self.ta_info['processing_instructions']:
for pi in self.ta_info['processing_instructions']:
PI = ET.ProcessingInstruction(pi[0], pi[1])
self._outstream.write(ET.tostring(PI) + indentstring) #do not use encoding here. gives double xml prolog; possibly because ET.ElementTree.write i used again by write()
#indent the xml elements
if self.ta_info['indented']:
self.botsindent(root)
#write tree to file; this is differnt for different python/elementtree versions
if sys.version < '2.7.0' and ET.VERSION < '1.3.0':
xmltree.write(self._outstream,encoding=self.ta_info['charset'])
else:
xmltree.write(self._outstream,encoding=self.ta_info['charset'],xml_declaration=False)
def botsindent(self,elem, level=0,indentstring=' '):
i = "\n" + level*indentstring
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + indentstring
for e in elem:
self.botsindent(e, level+1)
if not e.tail or not e.tail.strip():
e.tail = i + indentstring
if not e.tail or not e.tail.strip():
e.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def _node2xml(self,node):
''' recursive method.
'''
newnode = self._node2xmlfields(node.record)
for childnode in node.children:
newnode.append(self._node2xml(childnode))
return newnode
def _node2xmlfields(self,noderecord):
''' fields in a node are written to xml fields; output is sorted according to grammar
'''
#first generate the xml-'record'
#~ print 'record',noderecord['BOTSID']
attributedict = {}
recordtag = noderecord['BOTSID']
attributemarker = recordtag + self.ta_info['attributemarker'] #attributemarker is a marker in the fieldname used to find out if field is an attribute of either xml-'record' or xml-element
#~ print ' rec_att_mark',attributemarker
for key,value in noderecord.items(): #find attributes belonging to xml-'record' and store in attributedict
if key.startswith(attributemarker):
#~ print ' record attribute',key,value
attributedict[key[len(attributemarker):]] = value
xmlrecord = ET.Element(recordtag,attributedict) #make the xml ET node
if 'BOTSCONTENT' in noderecord: #BOTSCONTENT is used to store the value/text of the xml-record itself.
xmlrecord.text = noderecord['BOTSCONTENT']
del noderecord['BOTSCONTENT']
for key in attributedict.keys(): #remove used fields
del noderecord[attributemarker+key]
del noderecord['BOTSID'] #remove 'record' tag
#generate xml-'fields' in xml-'record'; sort these by looping over records definition
for field_def in self.defmessage.recorddefs[recordtag]: #loop over fields in 'record'
if field_def[ID] not in noderecord: #if field not in outmessage: skip
continue
#~ print ' field',field_def
attributedict = {}
attributemarker = field_def[ID] + self.ta_info['attributemarker']
#~ print ' field_att_mark',attributemarker
for key,value in noderecord.items():
if key.startswith(attributemarker):
#~ print ' field attribute',key,value
attributedict[key[len(attributemarker):]] = value
ET.SubElement(xmlrecord, field_def[ID],attributedict).text=noderecord[field_def[ID]] #add xml element to xml record
for key in attributedict.keys(): #remove used fields
del noderecord[attributemarker+key]
del noderecord[field_def[ID]] #remove xml entity tag
return xmlrecord
def _initwrite(self):
botsglobal.logger.debug(u'Start writing to file "%s".',self.ta_info['filename'])
self._outstream = botslib.opendata(self.ta_info['filename'],"wb")
class xmlnocheck(xml):
def normalisetree(self,node):
pass
def _node2xmlfields(self,noderecord):
''' fields in a node are written to xml fields; output is sorted according to grammar
'''
if 'BOTSID' not in noderecord:
raise botslib.OutMessageError(_(u'No field "BOTSID" in xml-output in: "$record"'),record=noderecord)
#first generate the xml-'record'
attributedict = {}
recordtag = noderecord['BOTSID']
attributemarker = recordtag + self.ta_info['attributemarker']
for key,value in noderecord.items(): #find the attributes for the xml-record, put these in attributedict
if key.startswith(attributemarker):
attributedict[key[len(attributemarker):]] = value
xmlrecord = ET.Element(recordtag,attributedict) #make the xml ET node
if 'BOTSCONTENT' in noderecord:
xmlrecord.text = noderecord['BOTSCONTENT']
del noderecord['BOTSCONTENT']
for key in attributedict.keys(): #remove used fields
del noderecord[attributemarker+key]
del noderecord['BOTSID'] #remove 'record' tag
#generate xml-'fields' in xml-'record'; not sorted
noderecordcopy = noderecord.copy()
for key,value in noderecordcopy.items():
if key not in noderecord or self.ta_info['attributemarker'] in key: #if field not in outmessage: skip
continue
attributedict = {}
attributemarker = key + self.ta_info['attributemarker']
for key2,value2 in noderecord.items():
if key2.startswith(attributemarker):
attributedict[key2[len(attributemarker):]] = value2
ET.SubElement(xmlrecord, key,attributedict).text=value #add xml element to xml record
for key2 in attributedict.keys(): #remove used fields
del noderecord[attributemarker+key2]
del noderecord[key] #remove xml entity tag
return xmlrecord
class json(Outmessage):
def _initwrite(self):
super(json,self)._initwrite()
if self.multiplewrite:
self._outstream.write(u'[')
def _write(self,node):
''' convert node tree to appropriate python object.
python objects are written to json by simplejson.
'''
if self.nrmessagewritten:
self._outstream.write(u',')
jsonobject = {node.record['BOTSID']:self._node2json(node)}
if self.ta_info['indented']:
indent=2
else:
indent=None
simplejson.dump(jsonobject, self._outstream, skipkeys=False, ensure_ascii=False, check_circular=False, indent=indent)
def _closewrite(self):
if self.multiplewrite:
self._outstream.write(u']')
super(json,self)._closewrite()
def _node2json(self,node):
''' recursive method.
'''
#newjsonobject is the json object assembled in the function.
newjsonobject = node.record.copy() #init newjsonobject with record fields from node
for childnode in node.children: #fill newjsonobject with the records from childnodes.
key=childnode.record['BOTSID']
if key in newjsonobject:
newjsonobject[key].append(self._node2json(childnode))
else:
newjsonobject[key]=[self._node2json(childnode)]
del newjsonobject['BOTSID']
return newjsonobject
def _node2jsonold(self,node):
''' recursive method.
'''
newdict = node.record.copy()
if node.children: #if this node has records in it.
sortedchildren={} #empty dict
for childnode in node.children:
botsid=childnode.record['BOTSID']
if botsid in sortedchildren:
sortedchildren[botsid].append(self._node2json(childnode))
else:
sortedchildren[botsid]=[self._node2json(childnode)]
for key,value in sortedchildren.items():
if len(value)==1:
newdict[key]=value[0]
else:
newdict[key]=value
del newdict['BOTSID']
return newdict
class jsonnocheck(json):
def normalisetree(self,node):
pass
class template(Outmessage):
''' uses Kid library for templating.'''
class TemplateData(object):
pass
def __init__(self,ta_info):
self.data = template.TemplateData() #self.data is used by mapping script as container for content
super(template,self).__init__(ta_info)
def writeall(self):
''' Very different writeall:
there is no tree of nodes; there is no grammar.structure/recorddefs; kid opens file by itself.
'''
try:
import kid
except:
txt=botslib.txtexc()
raise ImportError(_(u'Dependency failure: editype "template" requires python library "kid". Error:\n%s'%txt))
#for template-grammar: only syntax is used. Section 'syntax' has to have 'template'
self.outmessagegrammarread(self.ta_info['editype'],self.ta_info['messagetype'])
templatefile = botslib.abspath(u'templates',self.ta_info['template'])
try:
botsglobal.logger.debug(u'Start writing to file "%s".',self.ta_info['filename'])
ediprint = kid.Template(file=templatefile, data=self.data)
except:
txt=botslib.txtexc()
raise botslib.OutMessageError(_(u'While templating "$editype.$messagetype", error:\n$txt'),editype=self.ta_info['editype'],messagetype=self.ta_info['messagetype'],txt=txt)
try:
f = botslib.opendata(self.ta_info['filename'],'wb')
ediprint.write(f,
#~ ediprint.write(botslib.abspathdata(self.ta_info['filename']),
encoding=self.ta_info['charset'],
output=self.ta_info['output'], #output is specific parameter for class; init from grammar.syntax
fragment=self.ta_info['merge'])
except:
txt=botslib.txtexc()
raise botslib.OutMessageError(_(u'While templating "$editype.$messagetype", error:\n$txt'),editype=self.ta_info['editype'],messagetype=self.ta_info['messagetype'],txt=txt)
botsglobal.logger.debug(_(u'End writing to file "%s".'),self.ta_info['filename'])
class templatehtml(Outmessage):
''' uses Genshi library for templating. Genshi is very similar to Kid, and is the fork/follow-up of Kid.
Kid is not being deveolped further; in time Kid will not be in repositories etc.
Templates for Genshi are like Kid templates. Changes:
- other namespace: xmlns:py="http://genshi.edgewall.org/" instead of xmlns:py="http://purl.org/kid/ns#"
- enveloping is different: <xi:include href="${message}" /> instead of <div py:replace="document(message)"/>
'''
class TemplateData(object):
pass
def __init__(self,ta_info):
self.data = template.TemplateData() #self.data is used by mapping script as container for content
super(templatehtml,self).__init__(ta_info)
def writeall(self):
''' Very different writeall:
there is no tree of nodes; there is no grammar.structure/recorddefs; kid opens file by itself.
'''
try:
from genshi.template import TemplateLoader
except:
txt=botslib.txtexc()
raise ImportError(_(u'Dependency failure: editype "template" requires python library "genshi". Error:\n%s'%txt))
#for template-grammar: only syntax is used. Section 'syntax' has to have 'template'
self.outmessagegrammarread(self.ta_info['editype'],self.ta_info['messagetype'])
templatefile = botslib.abspath(u'templateshtml',self.ta_info['template'])
try:
botsglobal.logger.debug(u'Start writing to file "%s".',self.ta_info['filename'])
loader = TemplateLoader(auto_reload=False)
tmpl = loader.load(templatefile)
except:
txt=botslib.txtexc()
raise botslib.OutMessageError(_(u'While templating "$editype.$messagetype", error:\n$txt'),editype=self.ta_info['editype'],messagetype=self.ta_info['messagetype'],txt=txt)
try:
f = botslib.opendata(self.ta_info['filename'],'wb')
stream = tmpl.generate(data=self.data)
stream.render(method='xhtml',encoding=self.ta_info['charset'],out=f)
except:
txt=botslib.txtexc()
raise botslib.OutMessageError(_(u'While templating "$editype.$messagetype", error:\n$txt'),editype=self.ta_info['editype'],messagetype=self.ta_info['messagetype'],txt=txt)
botsglobal.logger.debug(_(u'End writing to file "%s".'),self.ta_info['filename'])
class database(jsonnocheck):
pass
class db(Outmessage):
''' out.root is pickled, and saved.
'''
def __init__(self,ta_info):
super(db,self).__init__(ta_info)
self.root = None #make root None; root is not a Node-object anyway; None can easy be tested when writing.
def writeall(self):
if self.root is None:
raise botslib.OutMessageError(_(u'No outgoing message')) #then there is nothing to write...
botsglobal.logger.debug(u'Start writing to file "%s".',self.ta_info['filename'])
self._outstream = botslib.opendata(self.ta_info['filename'],'wb')
db_object = pickle.dump(self.root,self._outstream,2)
self._outstream.close()
botsglobal.logger.debug(u'End writing to file "%s".',self.ta_info['filename'])
self.ta_info['envelope'] = 'db' #use right enveloping for db: no coping etc, use same file.
class raw(Outmessage):
''' out.root is just saved.
'''
def __init__(self,ta_info):
super(raw,self).__init__(ta_info)
self.root = None #make root None; root is not a Node-object anyway; None can easy be tested when writing.
def writeall(self):
if self.root is None:
raise botslib.OutMessageError(_(u'No outgoing message')) #then there is nothing to write...
botsglobal.logger.debug(u'Start writing to file "%s".',self.ta_info['filename'])
self._outstream = botslib.opendata(self.ta_info['filename'],'wb')
self._outstream.write(self.root)
self._outstream.close()
botsglobal.logger.debug(u'End writing to file "%s".',self.ta_info['filename'])
self.ta_info['envelope'] = 'raw' #use right enveloping for raw: no coping etc, use same file.
| Python |
from django.conf.urls.defaults import *
from django.contrib import admin,auth
from django.views.generic.simple import redirect_to
from django.contrib.auth.decorators import login_required,user_passes_test
from bots import views
admin.autodiscover()
staff_required = user_passes_test(lambda u: u.is_staff)
superuser_required = user_passes_test(lambda u: u.is_superuser)
urlpatterns = patterns('',
(r'^login.*', 'django.contrib.auth.views.login', {'template_name': 'admin/login.html'}),
(r'^logout.*', 'django.contrib.auth.views.logout',{'next_page': '/'}),
#login required
(r'^home.*', login_required(views.home)),
(r'^incoming.*', login_required(views.incoming)),
(r'^detail.*', login_required(views.detail)),
(r'^process.*', login_required(views.process)),
(r'^outgoing.*', login_required(views.outgoing)),
(r'^document.*', login_required(views.document)),
(r'^reports.*', login_required(views.reports)),
(r'^confirm.*', login_required(views.confirm)),
(r'^filer.*', login_required(views.filer)),
#only staff
(r'^admin/$', login_required(views.home)), #do not show django admin root page
(r'^admin/bots/$', login_required(views.home)), #do not show django admin root page
(r'^admin/bots/uniek/.+$', redirect_to, {'url': '/admin/bots/uniek/'}), #hack. uniek counters can be changed (on main page), but never added. This rule disables the edit/add uniek pages.
(r'^admin/', include(admin.site.urls)),
(r'^runengine.+', staff_required(views.runengine)),
#only superuser
(r'^delete.*', superuser_required(views.delete)),
(r'^plugin.*', superuser_required(views.plugin)),
(r'^plugout.*', superuser_required(views.plugout)),
(r'^unlock.*', superuser_required(views.unlock)),
(r'^sendtestmail.*', superuser_required(views.sendtestmailmanagers)),
#catch-all
(r'^.*', 'bots.views.index'),
)
handler500='bots.views.server_error'
| Python |
from django import template
register = template.Library()
@register.filter
def url2path(value):
if value.startswith('/admin/bots/'):
value = value[12:]
else:
value = value[1:]
if value:
if value[-1] == '/':
value = value[:-1]
else:
value = 'home'
return value
| Python |
import os
import sys
import posixpath
try:
import cPickle as pickle
except:
import pickle
import time
import datetime
import email
import email.Utils
import email.Generator
import email.Message
import email.encoders
import glob
import shutil
import fnmatch
import codecs
if os.name == 'nt':
import msvcrt
elif os.name == 'posix':
import fcntl
try:
import json as simplejson
except ImportError:
import simplejson
import smtplib
import poplib
import imaplib
import ftplib
import xmlrpclib
from django.utils.translation import ugettext as _
#Bots modules
import botslib
import botsglobal
import inmessage
import outmessage
from botsconfig import *
@botslib.log_session
def run(idchannel,idroute=''):
'''run a communication session (dispatcher for communication functions).'''
for channeldict in botslib.query('''SELECT *
FROM channel
WHERE idchannel=%(idchannel)s''',
{'idchannel':idchannel}):
botsglobal.logger.debug(u'start communication channel "%s" type %s %s.',channeldict['idchannel'],channeldict['type'],channeldict['inorout'])
#update communication/run process with idchannel
ta_run = botslib.OldTransaction(botslib._Transaction.processlist[-1])
if channeldict['inorout'] == 'in':
ta_run.update(fromchannel=channeldict['idchannel'])
else:
ta_run.update(tochannel=channeldict['idchannel'])
try:
userscript,scriptname = botslib.botsimport('communicationscripts',channeldict['idchannel'])
except ImportError:
userscript = scriptname = None
#get the communication class to use:
if userscript and hasattr(userscript,channeldict['type']): #check communication class in user script (sub classing)
classtocall = getattr(userscript,channeldict['type'])
elif userscript and hasattr(userscript,'UserCommunicationClass'): #check for communication class called 'UserCommunicationClass' in user script. 20110920: Obsolete, depreciated. Keep this for now.
classtocall = getattr(userscript,'UserCommunicationClass')
else:
classtocall = globals()[channeldict['type']] #get the communication class from this module
classtocall(channeldict,idroute,userscript,scriptname) #call the class for this type of channel
botsglobal.logger.debug(u'finished communication channel "%s" type %s %s.',channeldict['idchannel'],channeldict['type'],channeldict['inorout'])
break #there can only be one channel; this break takes care that if found, the 'else'-clause is skipped
else:
raise botslib.CommunicationError(_(u'Channel "$idchannel" is unknown.'),idchannel=idchannel)
class _comsession(object):
''' Abstract class for communication-session. Use only subclasses.
Subclasses are called by dispatcher function 'run'
Often 'idroute' is passed as a parameter. This is ONLY because of the @botslib.log_session-wrapper!
use self.idroute!!
'''
def __init__(self,channeldict,idroute,userscript,scriptname):
''' All communication is performed in init.'''
self.channeldict=channeldict
self.idroute=idroute
self.userscript=userscript
self.scriptname=scriptname
if self.channeldict['inorout']=='out':
#routes can have the same outchannel.
#the different outchannels can be 'direct' or deferred (in route)
nroffiles = self.precommunicate(FILEOUT,RAWOUT)
if self.countoutfiles() > 0: #for out-comm: send if something to send
self.connect()
self.outcommunicate()
self.disconnect()
self.archive()
else: #incommunication
if botsglobal.incommunicate: #for in-communication: only communicate for new run
#handle maxsecondsperchannel: use global value from bots.ini unless specified in channel. (In database this is field 'rsrv2'.)
#~ print "self.channeldict['rsrv2']",self.channeldict['rsrv2']
if self.channeldict['rsrv2'] <= 0:
self.maxsecondsperchannel = botsglobal.ini.getint('settings','maxsecondsperchannel',sys.maxint)
else:
self.maxsecondsperchannel = self.channeldict['rsrv2']
self.connect()
self.incommunicate()
self.disconnect()
self.postcommunicate(RAWIN,FILEIN)
self.archive()
def archive(self):
'''archive received or send files; archive only if receive is correct.'''
if not self.channeldict['archivepath']:
return
if self.channeldict['inorout'] == 'in':
status = FILEIN
statust = OK
channel = 'fromchannel'
else:
status = FILEOUT
statust = DONE
channel = 'tochannel'
if self.userscript and hasattr(self.userscript,'archivepath'):
archivepath = botslib.runscript(self.userscript,self.scriptname,'archivepath',channeldict=self.channeldict)
else:
archivepath = botslib.join(self.channeldict['archivepath'],time.strftime('%Y%m%d'))
checkedifarchivepathisthere = False #for a outchannel that is less used, lots of empty dirs will be created. This var is used to check within loop if dir exist, but this is only checked one time.
for row in botslib.query('''SELECT filename,idta
FROM ta
WHERE idta>%(rootidta)s
AND status=%(status)s
AND statust=%(statust)s
AND ''' + channel + '''=%(idchannel)s
AND idroute=%(idroute)s
''',
{'idchannel':self.channeldict['idchannel'],'status':status,
'statust':statust,'idroute':self.idroute,'rootidta':botslib.get_minta4query()}):
if not checkedifarchivepathisthere:
botslib.dirshouldbethere(archivepath)
checkedifarchivepathisthere = True
absfilename = botslib.abspathdata(row['filename'])
if self.userscript and hasattr(self.userscript,'archivename'):
archivename = botslib.runscript(self.userscript,self.scriptname,'archivename',channeldict=self.channeldict,idta=row['idta'],filename=absfilename)
shutil.copy(absfilename,botslib.join(archivepath,archivename))
else:
shutil.copy(absfilename,archivepath)
def countoutfiles(self):
''' counts the number of edifiles to be transmitted.'''
for row in botslib.query('''SELECT COUNT(*) as count
FROM ta
WHERE idta>%(rootidta)s
AND status=%(status)s
AND statust=%(statust)s
AND tochannel=%(tochannel)s
''',
{'idroute':self.idroute,'status':RAWOUT,'statust':OK,
'tochannel':self.channeldict['idchannel'],'rootidta':botslib.get_minta4query()}):
return row['count']
@botslib.log_session
def postcommunicate(self,fromstatus,tostatus):
''' transfer communication-file from status RAWIN to FILEIN '''
return botslib.addinfo(change={'status':tostatus},where={'status':fromstatus,'fromchannel':self.channeldict['idchannel'],'idroute':self.idroute})
@botslib.log_session
def precommunicate(self,fromstatus,tostatus):
''' transfer communication-file from status FILEOUT to RAWOUT'''
return botslib.addinfo(change={'status':tostatus},where={'status':fromstatus,'tochannel':self.channeldict['idchannel']})
def file2mime(self,fromstatus,tostatus):
''' transfer communication-file from status FILEOUT to RAWOUT and convert to mime.
1 part/file always in 1 mail.
'''
counter = 0 #count the number of correct processed files
#select files with right statust, status and channel.
for row in botslib.query('''SELECT idta,filename,frompartner,topartner,charset,contenttype,editype
FROM ta
WHERE idta>%(rootidta)s
AND status=%(status)s
AND statust=%(statust)s
AND tochannel=%(idchannel)s
''',
{'idchannel':self.channeldict['idchannel'],'status':fromstatus,
'statust':OK,'idroute':self.idroute,'rootidta':botslib.get_minta4query()}):
try:
ta_from = botslib.OldTransaction(row['idta'])
ta_to = ta_from.copyta(status=tostatus)
ta_to.synall() #needed for user exits: get all parameters of ta_to from database;
confirmtype = u''
confirmasked = False
charset = row['charset']
if row['editype'] == 'email-confirmation': #outgoing MDN: message is already assembled
outfilename = row['filename']
else: #assemble message: headers and payload. Bots uses simple MIME-envelope; by default payload is an attachment
message = email.Message.Message()
#set 'from' header (sender)
frommail,ccfrom = self.idpartner2mailaddress(row['frompartner']) #lookup email address for partnerID
message.add_header('From', frommail)
#set 'to' header (receiver)
if self.userscript and hasattr(self.userscript,'getmailaddressforreceiver'): #user exit to determine to-address/receiver
tomail,ccto = botslib.runscript(self.userscript,self.scriptname,'getmailaddressforreceiver',channeldict=self.channeldict,ta=ta_to)
else:
tomail,ccto = self.idpartner2mailaddress(row['topartner']) #lookup email address for partnerID
message.add_header('To',tomail)
if ccto:
message.add_header('CC',ccto)
#set Message-ID
reference=email.Utils.make_msgid(str(ta_to.idta)) #use transaction idta in message id.
message.add_header('Message-ID',reference)
ta_to.update(frommail=frommail,tomail=tomail,cc=ccto,reference=reference) #update now (in order to use correct & updated ta_to in user script)
#set date-time stamp
message.add_header("Date",email.Utils.formatdate(localtime=True))
#set Disposition-Notification-To: ask/ask not a a MDN?
if botslib.checkconfirmrules('ask-email-MDN',idroute=self.idroute,idchannel=self.channeldict['idchannel'],
frompartner=row['frompartner'],topartner=row['topartner']):
message.add_header("Disposition-Notification-To",frommail)
confirmtype = u'ask-email-MDN'
confirmasked = True
#set subject
subject=str(row['idta'])
content = botslib.readdata(row['filename']) #get attachment from data file
if self.userscript and hasattr(self.userscript,'subject'): #user exit to determine subject
subject = botslib.runscript(self.userscript,self.scriptname,'subject',channeldict=self.channeldict,ta=ta_to,subjectstring=subject,content=content)
message.add_header('Subject',subject)
#set MIME-version
message.add_header('MIME-Version','1.0')
#set attachment filename
#create default attachment filename
unique = str(botslib.unique(self.channeldict['idchannel'])) #create unique part for attachment-filename
if self.channeldict['filename']:
attachmentfilename = self.channeldict['filename'].replace('*',unique) #filename is filename in channel where '*' is replaced by idta
else:
attachmentfilename = unique
if self.userscript and hasattr(self.userscript,'filename'): #user exit to determine attachmentname
attachmentfilename = botslib.runscript(self.userscript,self.scriptname,'filename',channeldict=self.channeldict,ta=ta_to,filename=attachmentfilename)
if attachmentfilename: #Tric: if attachmentfilename is None or empty string: do not send as an attachment.
message.add_header("Content-Disposition",'attachment',filename=attachmentfilename)
#set Content-Type and charset
charset = self.convertcodecformime(row['charset'])
message.add_header('Content-Type',row['contenttype'].lower(),charset=charset) #contenttype is set in grammar.syntax
#set attachment/payload; the Content-Transfer-Encoding is set by python encoder
message.set_payload(content) #do not use charset; this lead to unwanted encodings...bots always uses base64
if self.channeldict['askmdn'] == 'never': #channeldict['askmdn'] is the Mime encoding
email.encoders.encode_7or8bit(message) #no encoding; but the Content-Transfer-Encoding is set to 7-bit or 8-bt
elif self.channeldict['askmdn'] == 'ascii' and charset=='us-ascii':
pass #do nothing: ascii is default encoding
else: #if Mime encoding is 'always' or (Mime encoding == 'ascii' and charset!='us-ascii'): use base64
email.encoders.encode_base64(message)
#*******write email to file***************************
outfilename = str(ta_to.idta)
outfile = botslib.opendata(outfilename, 'wb')
g = email.Generator.Generator(outfile, mangle_from_=False, maxheaderlen=78)
g.flatten(message,unixfrom=False)
outfile.close()
except:
txt=botslib.txtexc()
ta_to.update(statust=ERROR,errortext=txt)
else:
counter += 1
ta_from.update(statust=DONE)
ta_to.update(statust=OK,filename=outfilename,confirmtype=confirmtype,confirmasked=confirmasked,charset=charset)
return counter
def mime2file(self,fromstatus,tostatus):
''' transfer communication-file from RAWIN to FILEIN, convert from Mime to file.
process mime-files:
- extract information (eg sender-address)
- do emailtransport-handling: generate MDN, process MDN
- save 'attachments' as files
- generate MDN if asked and OK from bots-configuration
'''
whitelist_multipart=['multipart/mixed','multipart/digest','multipart/signed','multipart/report','message/rfc822','multipart/alternative']
whitelist_major=['text','application']
blacklist_contenttype=['text/html','text/enriched','text/rtf','text/richtext','application/postscript']
def savemime(msg):
''' save contents of email as separate files.
is a nested function.
3x filtering:
- whitelist of multipart-contenttype
- whitelist of body-contentmajor
- blacklist of body-contentytpe
'''
nrmimesaved = 0
contenttype = msg.get_content_type()
if msg.is_multipart():
if contenttype in whitelist_multipart:
for part in msg.get_payload():
nrmimesaved += savemime(part)
else: #is not a multipart
if msg.get_content_maintype() not in whitelist_major or contenttype in blacklist_contenttype:
return 0
content = msg.get_payload(decode=True)
if not content or content.isspace():
return 0
charset=msg.get_content_charset('')
if not charset:
charset = self.channeldict['charset']
if self.userscript and hasattr(self.userscript,'accept_incoming_attachment'):
accept_attachment = botslib.runscript(self.userscript,self.scriptname,'accept_incoming_attachment',channeldict=self.channeldict,ta=ta_mime,charset=charset,content=content,contenttype=contenttype)
if accept_attachment == False:
return 0
ta_file = ta_mime.copyta(status=tostatus)
outfilename = str(ta_file.idta)
outfile = botslib.opendata(outfilename, 'wb')
outfile.write(content)
outfile.close()
nrmimesaved+=1
ta_file.update(statust=OK,
contenttype=contenttype,
charset=charset,
filename=outfilename)
return nrmimesaved
#*****************end of nested function savemime***************************
@botslib.log_session
def mdnreceive():
tmp = msg.get_param('reporttype')
if tmp is None or email.Utils.collapse_rfc2231_value(tmp)!='disposition-notification': #invalid MDN
raise botslib.CommunicationInError(_(u'Received email-MDN with errors.'))
for part in msg.get_payload():
if part.get_content_type()=='message/disposition-notification':
originalmessageid = part['original-message-id']
if originalmessageid is not None:
break
else: #invalid MDN: 'message/disposition-notification' not in email
raise botslib.CommunicationInError(_(u'Received email-MDN with errors.'))
botslib.change('''UPDATE ta
SET confirmed=%(confirmed)s, confirmidta=%(confirmidta)s
WHERE reference=%(reference)s
AND status=%(status)s
AND confirmasked=%(confirmasked)s
AND confirmtype=%(confirmtype)s
''',
{'status':RAWOUT,'reference':originalmessageid,'confirmed':True,'confirmtype':'ask-email-MDN','confirmidta':ta_mail.idta,'confirmasked':True})
#for now no checking if processing was OK.....
#performance: not good. Another way is to extract the original idta from the original messageid
@botslib.log_session
def mdnsend():
if not botslib.checkconfirmrules('send-email-MDN',idroute=self.idroute,idchannel=self.channeldict['idchannel'],
frompartner=frompartner,topartner=topartner):
return 0 #do not send
#make message
message = email.Message.Message()
message.add_header('From',tomail)
dispositionnotificationto = email.Utils.parseaddr(msg['disposition-notification-to'])[1]
message.add_header('To', dispositionnotificationto)
message.add_header('Subject', 'Return Receipt (displayed) - '+subject)
message.add_header("Date", email.Utils.formatdate(localtime=True))
message.add_header('MIME-Version','1.0')
message.add_header('Content-Type','multipart/report',reporttype='disposition-notification')
#~ message.set_type('multipart/report')
#~ message.set_param('reporttype','disposition-notification')
#make human readable message
humanmessage = email.Message.Message()
humanmessage.add_header('Content-Type', 'text/plain')
humanmessage.set_payload('This is an return receipt for the mail that you send to '+tomail)
message.attach(humanmessage)
#make machine readable message
machinemessage = email.Message.Message()
machinemessage.add_header('Content-Type', 'message/disposition-notification')
machinemessage.add_header('Original-Message-ID', reference)
nep = email.Message.Message()
machinemessage.attach(nep)
message.attach(machinemessage)
#write email to file;
ta_mdn=botslib.NewTransaction(status=MERGED) #new transaction for group-file
mdn_reference = email.Utils.make_msgid(str(ta_mdn.idta)) #we first have to get the mda-ta to make this reference
message.add_header('Message-ID', mdn_reference)
mdnfilename = str(ta_mdn.idta)
mdnfile = botslib.opendata(mdnfilename, 'wb')
g = email.Generator.Generator(mdnfile, mangle_from_=False, maxheaderlen=78)
g.flatten(message,unixfrom=False)
mdnfile.close()
ta_mdn.update(statust=OK,
idroute=self.idroute,
filename=mdnfilename,
editype='email-confirmation',
frompartner=topartner,
topartner=frompartner,
frommail=tomail,
tomail=dispositionnotificationto,
reference=mdn_reference,
content='multipart/report',
fromchannel=self.channeldict['idchannel'],
charset='ascii')
return ta_mdn.idta
#*****************end of nested function dispositionnotification***************************
#select received mails for channel
for row in botslib.query('''SELECT idta,filename
FROM ta
WHERE idta>%(rootidta)s
AND status=%(status)s
AND statust=%(statust)s
AND fromchannel=%(fromchannel)s
''',
{'status':fromstatus,'statust':OK,'rootidta':botslib.get_minta4query(),
'fromchannel':self.channeldict['idchannel'],'idroute':self.idroute}):
try:
confirmtype = ''
confirmed = False
confirmasked = False
confirmidta = 0
ta_mail = botslib.OldTransaction(row['idta'])
ta_mime = ta_mail.copyta(status=MIMEIN)
infile = botslib.opendata(row['filename'], 'rb')
msg = email.message_from_file(infile) #read and parse mail
infile.close()
frommail = email.Utils.parseaddr(msg['from'])[1]
tos = email.Utils.getaddresses(msg.get_all('to', []))
ccs = email.Utils.getaddresses(msg.get_all('cc', []))
#~ tomail = tos[0][1] #tomail is the email address of the first "To"-recipient
cc = ','.join([emailaddress[1] for emailaddress in (tos + ccs)])
reference = msg['message-id']
subject = msg['subject']
contenttype = msg.get_content_type()
#authorize: find the frompartner for the email addresses in the message
frompartner = ''
if not self.channeldict['starttls']: #reusing old database name; 'no check on "from:" email adress'
frompartner = self.mailaddress2idpartner(frommail)
topartner = '' #initialise topartner
tomail = '' #initialise tomail
if not self.channeldict['apop']: #reusing old database name; 'no check on "to:" email adress'
for toname,tomail_tmp in tos: #all tos-addresses are checked; only one needs to be authorised.
try:
topartner = self.mailaddress2idpartner(tomail_tmp)
tomail = tomail_tmp
break
except botslib.CommunicationInError:
pass
else:
if not topartner:
emailtos = [address[1] for address in tos]
raise botslib.CommunicationInError(_(u'Emailaddress(es) $email not authorised/unknown (channel "$idchannel").'),email=emailtos,idchannel=self.channeldict['idchannel'])
#update transaction of mail with information found in mail
ta_mime.update(frommail=frommail, #why now why not later: because ta_mime is copied to separate files later, so need the info now
tomail=tomail,
reference=reference,
contenttype=contenttype,
frompartner=frompartner,
topartner=topartner,
cc = cc)
if contenttype == 'multipart/report': #process received MDN confirmation
mdnreceive()
else:
if msg.has_key('disposition-notification-To'): #sender requests a MDN
confirmidta = mdnsend()
if confirmidta:
confirmtype = 'send-email-MDN'
confirmed = True
confirmasked = True
nrmimesaved = savemime(msg)
if not nrmimesaved:
raise botslib.CommunicationInError (_(u'No valid attachment in received email'))
except:
txt=botslib.txtexc()
ta_mime.failure()
ta_mime.update(statust=ERROR,errortext=txt)
else:
ta_mime.update(statust=DONE)
ta_mail.update(statust=DONE,confirmtype=confirmtype,confirmed=confirmed,confirmasked=confirmasked,confirmidta=confirmidta)
return 0 #is not useful, as mime2file is used in postcommunication, and #files processed is not checked in postcommunication.
def mailaddress2idpartner(self,mailaddress):
for row in botslib.query(u'''SELECT chanpar.idpartner_id as idpartner
FROM chanpar,channel,partner
WHERE chanpar.idchannel_id=channel.idchannel
AND chanpar.idpartner_id=partner.idpartner
AND partner.active=%(active)s
AND chanpar.idchannel_id=%(idchannel)s
AND LOWER(chanpar.mail)=%(mail)s''',
{'active':True,'idchannel':self.channeldict['idchannel'],'mail':mailaddress.lower()}):
return row['idpartner']
else: #if not found
for row in botslib.query(u'''SELECT idpartner
FROM partner
WHERE active=%(active)s
AND LOWER(mail)=%(mail)s''',
{'active':True,'mail':mailaddress.lower()}):
return row['idpartner']
raise botslib.CommunicationInError(_(u'Emailaddress "$email" unknown (or not authorised for channel "$idchannel").'),email=mailaddress,idchannel=self.channeldict['idchannel'])
def idpartner2mailaddress(self,idpartner):
for row in botslib.query(u'''SELECT chanpar.mail as mail,chanpar.cc as cc
FROM chanpar,channel,partner
WHERE chanpar.idchannel_id=channel.idchannel
AND chanpar.idpartner_id=partner.idpartner
AND partner.active=%(active)s
AND chanpar.idchannel_id=%(idchannel)s
AND chanpar.idpartner_id=%(idpartner)s''',
{'active':True,'idchannel':self.channeldict['idchannel'],'idpartner':idpartner}):
if row['mail']:
return row['mail'],row['cc']
else: #if not found
for row in botslib.query(u'''SELECT mail,cc
FROM partner
WHERE active=%(active)s
AND idpartner=%(idpartner)s''',
{'active':True,'idpartner':idpartner}):
if row['mail']:
return row['mail'],row['cc']
else:
raise botslib.CommunicationOutError(_(u'No mail-address for partner "$partner" (channel "$idchannel").'),partner=idpartner,idchannel=self.channeldict['idchannel'])
def connect(self):
pass
def disconnect(self):
pass
@staticmethod
def convertcodecformime(codec_in):
convertdict = {
'ascii' : 'us-ascii',
'unoa' : 'us-ascii',
'unob' : 'us-ascii',
'unoc' : 'iso-8859-1',
}
codec_in = codec_in.lower().replace('_','-')
return convertdict.get(codec_in,codec_in)
class pop3(_comsession):
def connect(self):
self.session = poplib.POP3(host=self.channeldict['host'],port=int(self.channeldict['port']))
self.session.set_debuglevel(botsglobal.ini.getint('settings','pop3debug',0)) #if used, gives information about session (on screen), for debugging pop3
self.session.user(self.channeldict['username'])
self.session.pass_(self.channeldict['secret'])
@botslib.log_session
def incommunicate(self):
''' Fetch messages from Pop3-mailbox.
A bad connection is tricky, because mails are actually deleted on the server when QUIT is successful.
A solution would be to connect, fetch, delete and quit for each mail, but this might introduce other problems.
So: keep a list of idta received OK.
If QUIT is not successful than delete these ta's
'''
self.listoftamarkedfordelete = []
maillist = self.session.list()[1] #get list of messages #alt: (response, messagelist, octets) = popsession.list() #get list of messages
startdatetime = datetime.datetime.now()
for mail in maillist:
try:
ta_from = botslib.NewTransaction(filename='pop3://'+self.channeldict['username']+'@'+self.channeldict['host'],
status=EXTERNIN,
fromchannel=self.channeldict['idchannel'],idroute=self.idroute)
ta_to = ta_from.copyta(status=RAWIN)
filename = str(ta_to.idta)
mailID = int(mail.split()[0]) #first 'word' is the message number/ID
maillines = self.session.retr(mailID)[1] #alt: (header, messagelines, octets) = popsession.retr(messageID)
fp = botslib.opendata(filename, 'wb')
fp.write(os.linesep.join(maillines))
fp.close()
if self.channeldict['remove']: #on server side mail is marked to be deleted. The pop3-server will actually delete the file if the QUIT commnd is receieved!
self.session.dele(mailID)
#add idta's of received mail in a list. If connection is not OK, QUIT command to POP3 server will not work. delete these as they will NOT
self.listoftamarkedfordelete += [ta_from.idta,ta_to.idta]
except: #something went wrong for this mail.
txt=botslib.txtexc()
botslib.ErrorProcess(functionname='pop3-incommunicate',errortext=txt,channeldict=self.channeldict)
ta_from.delete()
ta_to.delete()
#test connection. if connection is not OK stop fetching mails.
try:
self.session.noop()
except:
self.session = None #indicate session is not valid anymore
break
else:
ta_from.update(statust=DONE)
ta_to.update(statust=OK,filename=filename)
finally:
if (datetime.datetime.now()-startdatetime).seconds >= self.maxsecondsperchannel:
break
def disconnect(self):
try:
if not self.session:
raise botslib.CommunicationInError(_(u'Pop3 connection not OK'))
resp = self.session.quit() #pop3 server will now actually delete the mails
if resp[:1] != '+':
raise botslib.CommunicationInError(_(u'QUIT command to POP3 server failed'))
except:
botslib.ErrorProcess(functionname='pop3-incommunicate',errortext='Could not fetch emails via POP3; probably communication problems',channeldict=self.channeldict)
for idta in self.listoftamarkedfordelete:
ta = botslib.OldTransaction(idta)
ta.delete()
@botslib.log_session
def postcommunicate(self,fromstatus,tostatus):
self.mime2file(fromstatus,tostatus)
class pop3s(pop3):
def connect(self):
self.session = poplib.POP3_SSL(host=self.channeldict['host'],port=int(self.channeldict['port']))
self.session.set_debuglevel(botsglobal.ini.getint('settings','pop3debug',0)) #if used, gives information about session (on screen), for debugging pop3
self.session.user(self.channeldict['username'])
self.session.pass_(self.channeldict['secret'])
class pop3apop(pop3):
def connect(self):
self.session = poplib.POP3(host=self.channeldict['host'],port=int(self.channeldict['port']))
self.session.set_debuglevel(botsglobal.ini.getint('settings','pop3debug',0)) #if used, gives information about session (on screen), for debugging pop3
self.session.apop(self.channeldict['username'],self.channeldict['secret']) #python handles apop password encryption
class imap4(_comsession):
''' Fetch email from IMAP server.
'''
def connect(self):
imaplib.Debug = botsglobal.ini.getint('settings','imap4debug',0) #if used, gives information about session (on screen), for debugging imap4
self.session = imaplib.IMAP4(host=self.channeldict['host'],port=int(self.channeldict['port']))
self.session.login(self.channeldict['username'],self.channeldict['secret'])
@botslib.log_session
def incommunicate(self):
''' Fetch messages from imap4-mailbox.
'''
# path may contain a mailbox name, otherwise use INBOX
if self.channeldict['path']:
mailbox_name = self.channeldict['path']
else:
mailbox_name = 'INBOX'
response, data = self.session.select(mailbox_name)
if response != 'OK': # eg. mailbox does not exist
raise botslib.CommunicationError(mailbox_name + ': ' + data[0])
# Get the message UIDs that should be read
response, data = self.session.uid('search', None, '(UNDELETED)')
if response != 'OK': # have never seen this happen, but just in case!
raise botslib.CommunicationError(mailbox_name + ': ' + data[0])
maillist = data[0].split()
startdatetime = datetime.datetime.now()
for mail in maillist:
try:
ta_from = botslib.NewTransaction(filename='imap4://'+self.channeldict['username']+'@'+self.channeldict['host'],
status=EXTERNIN,
fromchannel=self.channeldict['idchannel'],idroute=self.idroute)
ta_to = ta_from.copyta(status=RAWIN)
filename = str(ta_to.idta)
# Get the message (header and body)
response, msg_data = self.session.uid('fetch',mail, '(RFC822)')
fp = botslib.opendata(filename, 'wb')
fp.write(msg_data[0][1])
fp.close()
# Flag message for deletion AND expunge. Direct expunge has advantages for bad (internet)connections.
if self.channeldict['remove']:
self.session.uid('store',mail, '+FLAGS', r'(\Deleted)')
self.session.expunge()
except:
txt=botslib.txtexc()
botslib.ErrorProcess(functionname='imap4-incommunicate',errortext=txt,channeldict=self.channeldict)
ta_from.delete()
ta_to.delete()
else:
ta_from.update(statust=DONE)
ta_to.update(statust=OK,filename=filename)
finally:
if (datetime.datetime.now()-startdatetime).seconds >= self.maxsecondsperchannel:
break
@botslib.log_session
def postcommunicate(self,fromstatus,tostatus):
self.mime2file(fromstatus,tostatus)
def disconnect(self):
self.session.close() #Close currently selected mailbox. This is the recommended command before 'LOGOUT'.
self.session.logout()
class imap4s(imap4):
def connect(self):
imaplib.Debug = botsglobal.ini.getint('settings','imap4debug',0) #if used, gives information about session (on screen), for debugging imap4
self.session = imaplib.IMAP4_SSL(host=self.channeldict['host'],port=int(self.channeldict['port']))
self.session.login(self.channeldict['username'],self.channeldict['secret'])
class smtp(_comsession):
@botslib.log_session
def precommunicate(self,fromstatus,tostatus):
return self.file2mime(fromstatus,tostatus)
def connect(self):
self.session = smtplib.SMTP(host=self.channeldict['host'],port=int(self.channeldict['port'])) #make connection
self.session.set_debuglevel(botsglobal.ini.getint('settings','smtpdebug',0)) #if used, gives information about session (on screen), for debugging smtp
self.login()
def login(self):
if self.channeldict['username'] and self.channeldict['secret']:
try:
#error in python 2.6.4....user and password can not be unicode
self.session.login(str(self.channeldict['username']),str(self.channeldict['secret']))
except smtplib.SMTPAuthenticationError:
raise botslib.CommunicationOutError(_(u'SMTP server did not accept user/password combination.'))
except:
txt=botslib.txtexc()
raise botslib.CommunicationOutError(_(u'SMTP login failed. Error:\n$txt'),txt=txt)
@botslib.log_session
def outcommunicate(self):
''' does smtp-session.
SSL/TLS supported (no keys-file/cert-file supported yet)
SMTP does not allow rollback. So if the sending of a mail fails, other mails may have been send.
'''
#send messages
for row in botslib.query(u'''SELECT idta,filename,frommail,tomail,cc
FROM ta
WHERE idta>%(rootidta)s
AND status=%(status)s
AND statust=%(statust)s
AND tochannel=%(tochannel)s
''',
{'status':RAWOUT,'statust':OK,'rootidta':botslib.get_minta4query(),
'tochannel':self.channeldict['idchannel']}):
try:
ta_from = botslib.OldTransaction(row['idta'])
ta_to = ta_from.copyta(status=EXTERNOUT)
addresslist = [x for x in (row['tomail'],row['cc']) if x]
sendfile = botslib.opendata(row['filename'], 'rb')
msg = sendfile.read()
sendfile.close()
self.session.sendmail(row['frommail'], addresslist, msg)
except:
txt=botslib.txtexc()
ta_to.update(statust=ERROR,errortext=txt,filename='smtp://'+self.channeldict['username']+'@'+self.channeldict['host'])
else:
ta_from.update(statust=DONE)
ta_to.update(statust=DONE,filename='smtp://'+self.channeldict['username']+'@'+self.channeldict['host'])
def disconnect(self):
try: #Google gives/gave error closing connection. Not a real problem.
self.session.quit()
except:
pass
class smtps(smtp):
def connect(self):
if hasattr(smtplib,'SMTP_SSL'):
self.session = smtplib.SMTP_SSL(host=self.channeldict['host'],port=int(self.channeldict['port'])) #make connection
else: #smtp_ssl not in standard lib for python<=2.5; if not, use 'own' smtps module.
import bots.smtpssllib as smtpssllib
self.session = smtpssllib.SMTP_SSL(host=self.channeldict['host'],port=int(self.channeldict['port'])) #make connection
self.session.set_debuglevel(botsglobal.ini.getint('settings','smtpdebug',0)) #if used, gives information about session (on screen), for debugging smtp
self.login()
class smtpstarttls(smtp):
def connect(self):
self.session = smtplib.SMTP(host=self.channeldict['host'],port=int(self.channeldict['port'])) #make connection
self.session.set_debuglevel(botsglobal.ini.getint('settings','smtpdebug',0)) #if used, gives information about session (on screen), for debugging smtp
self.session.ehlo()
self.session.starttls()
self.session.ehlo()
self.login()
class file(_comsession):
def connect(self):
if self.channeldict['lockname']: #directory locking: create lock-file. If the lockfile is already present an exception is raised.
lockname = botslib.join(self.channeldict['path'],self.channeldict['lockname'])
lock = os.open(lockname,os.O_WRONLY | os.O_CREAT | os.O_EXCL)
os.close(lock)
@botslib.log_session
def incommunicate(self):
''' gets files from filesystem. To be used via receive-dispatcher.
each to be imported file is transaction.
each imported file is transaction.
IF error in importing: imported files are either OK or ERROR.
what could not be imported is not removed
'''
frompath = botslib.join(self.channeldict['path'],self.channeldict['filename'])
#fetch messages from filesystem.
startdatetime = datetime.datetime.now()
for fromfilename in [c for c in glob.glob(frompath) if os.path.isfile(c)]:
try:
ta_from = botslib.NewTransaction(filename=fromfilename,
status=EXTERNIN,
fromchannel=self.channeldict['idchannel'],
charset=self.channeldict['charset'],idroute=self.idroute)
ta_to = ta_from.copyta(status=RAWIN)
#open fromfile, syslock if indicated
fromfile = open(fromfilename,'rb')
if self.channeldict['syslock']:
if os.name == 'nt':
msvcrt.locking(fromfile.fileno(), msvcrt.LK_LOCK, 0x0fffffff)
elif os.name == 'posix':
fcntl.lockf(fromfile.fileno(), fcntl.LOCK_SH|fcntl.LOCK_NB)
else:
raise botslib.LockedFileError(_(u'Can not do a systemlock on this platform'))
#open tofile
tofilename = str(ta_to.idta)
tofile = botslib.opendata(tofilename, 'wb')
#copy
shutil.copyfileobj(fromfile,tofile)
fromfile.close()
tofile.close()
if self.channeldict['remove']:
os.remove(fromfilename)
except:
txt=botslib.txtexc()
botslib.ErrorProcess(functionname='file-incommunicate',errortext=txt,channeldict=self.channeldict)
ta_from.delete()
ta_to.delete()
else:
ta_from.update(statust=DONE)
ta_to.update(filename=tofilename,statust=OK)
finally:
if (datetime.datetime.now()-startdatetime).seconds >= self.maxsecondsperchannel:
break
@botslib.log_session
def outcommunicate(self):
''' does output of files to filesystem. To be used via send-dispatcher.
Output is either:
1. 1 outputfile, messages are appended; filename is a fixed name
2. to directory; new file for each db-ta; if file exits: overwrite. File has to have a unique name.
'''
#check if output dir exists, else create it.
outputdir = botslib.join(self.channeldict['path'])
botslib.dirshouldbethere(outputdir)
#output to one file or a queue of files (with unique names)
if not self.channeldict['filename'] or '*' not in self.channeldict['filename']:
mode = 'ab' #fixed filename; not unique: append to file
else:
mode = 'wb' #unique filenames; (over)write
#select the db-ta's for this channel
for row in botslib.query(u'''SELECT idta,filename,charset
FROM ta
WHERE idta>%(rootidta)s
AND status=%(status)s
AND statust=%(statust)s
AND tochannel=%(tochannel)s
''',
{'tochannel':self.channeldict['idchannel'],'rootidta':botslib.get_minta4query(),
'status':RAWOUT,'statust':OK}):
try: #for each db-ta:
ta_from = botslib.OldTransaction(row['idta'])
ta_to = ta_from.copyta(status=EXTERNOUT)
botslib.checkcodeciscompatible(row['charset'],self.channeldict['charset'])
#open tofile, incl syslock if indicated
unique = str(botslib.unique(self.channeldict['idchannel'])) #create unique part for filename
if self.channeldict['filename']:
filename = self.channeldict['filename'].replace('*',unique) #filename is filename in channel where '*' is replaced by idta
else:
filename = unique
if self.userscript and hasattr(self.userscript,'filename'):
filename = botslib.runscript(self.userscript,self.scriptname,'filename',channeldict=self.channeldict,filename=filename,ta=ta_from)
tofilename = botslib.join(outputdir,filename)
tofile = open(tofilename, mode)
if self.channeldict['syslock']:
if os.name == 'nt':
msvcrt.locking(tofile.fileno(), msvcrt.LK_LOCK, 0x0fffffff)
elif os.name == 'posix':
fcntl.lockf(tofile.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
else:
raise botslib.LockedFileError(_(u'Can not do a systemlock on this platform'))
#open fromfile
fromfile = botslib.opendata(row['filename'], 'rb')
#copy
shutil.copyfileobj(fromfile,tofile)
fromfile.close()
tofile.close()
except:
txt=botslib.txtexc()
ta_to.update(statust=ERROR,errortext=txt)
else:
ta_from.update(statust=DONE)
ta_to.update(statust=DONE,filename=tofilename)
def disconnect(self):
#delete directory-lockfile
if self.channeldict['lockname']:
os.remove(lockname)
class mimefile(file):
@botslib.log_session
def postcommunicate(self,fromstatus,tostatus):
self.mime2file(fromstatus,tostatus)
@botslib.log_session
def precommunicate(self,fromstatus,tostatus):
return self.file2mime(fromstatus,tostatus)
class ftp(_comsession):
def connect(self):
botslib.settimeout(botsglobal.ini.getint('settings','ftptimeout',10))
self.session = ftplib.FTP()
self.session.set_debuglevel(botsglobal.ini.getint('settings','ftpdebug',0)) #set debug level (0=no, 1=medium, 2=full debug)
self.session.set_pasv(not self.channeldict['ftpactive']) #active or passive ftp
self.session.connect(host=self.channeldict['host'],port=int(self.channeldict['port']))
self.session.login(user=self.channeldict['username'],passwd=self.channeldict['secret'],acct=self.channeldict['ftpaccount'])
self.set_cwd()
def set_cwd(self):
self.dirpath = self.session.pwd()
if self.channeldict['path']:
self.dirpath = posixpath.normpath(posixpath.join(self.dirpath,self.channeldict['path']))
try:
self.session.cwd(self.dirpath) #set right path on ftp-server
except:
self.session.mkd(self.dirpath) #set right path on ftp-server; no nested directories
self.session.cwd(self.dirpath) #set right path on ftp-server
@botslib.log_session
def incommunicate(self):
''' do ftp: receive files. To be used via receive-dispatcher.
each to be imported file is transaction.
each imported file is transaction.
'''
startdatetime = datetime.datetime.now()
files = []
try: #some ftp servers give errors when directory is empty; catch these errors here
files = self.session.nlst()
except (ftplib.error_perm,ftplib.error_temp),resp:
if str(resp)[:3] not in ['550','450']:
raise
lijst = fnmatch.filter(files,self.channeldict['filename'])
for fromfilename in lijst: #fetch messages from ftp-server.
try:
ta_from = botslib.NewTransaction(filename='ftp:/'+posixpath.join(self.dirpath,fromfilename),
status=EXTERNIN,
fromchannel=self.channeldict['idchannel'],
charset=self.channeldict['charset'],idroute=self.idroute)
ta_to = ta_from.copyta(status=RAWIN)
tofilename = str(ta_to.idta)
tofile = botslib.opendata(tofilename, 'wb')
try:
if self.channeldict['ftpbinary']:
self.session.retrbinary("RETR " + fromfilename, tofile.write)
else:
self.session.retrlines("RETR " + fromfilename, lambda s, w=tofile.write: w(s+"\n"))
except ftplib.error_perm, resp:
if str(resp)[:3] in ['550',]: #we are trying to download a directory...
raise botslib.BotsError(u'To be catched')
else:
raise
tofile.close()
filesize = os.path.getsize(botslib.abspathdata(tofilename))
if not filesize:
raise botslib.BotsError(u'To be catched')
if self.channeldict['remove']:
self.session.delete(fromfilename)
except botslib.BotsError: #catch this exception and handle it
tofile.close()
ta_from.delete()
ta_to.delete()
except:
tofile.close()
txt=botslib.txtexc()
botslib.ErrorProcess(functionname='ftp-incommunicate',errortext=txt,channeldict=self.channeldict)
ta_from.delete()
ta_to.delete()
else:
ta_from.update(statust=DONE)
ta_to.update(filename=tofilename,statust=OK)
finally:
if (datetime.datetime.now()-startdatetime).seconds >= self.maxsecondsperchannel:
break
@botslib.log_session
def outcommunicate(self):
''' do ftp: send files. To be used via receive-dispatcher.
each to be send file is transaction.
each send file is transaction.
NB: ftp command APPE should be supported by server
'''
#check if one file or queue of files with unique names
if not self.channeldict['filename'] or '*'not in self.channeldict['filename']:
mode = 'APPE ' #fixed filename; not unique: append to file
else:
mode = 'STOR ' #unique filenames; (over)write
for row in botslib.query('''SELECT idta,filename,charset
FROM ta
WHERE idta>%(rootidta)s
AND status=%(status)s
AND statust=%(statust)s
AND tochannel=%(tochannel)s
''',
{'tochannel':self.channeldict['idchannel'],'rootidta':botslib.get_minta4query(),
'status':RAWOUT,'statust':OK}):
try:
ta_from = botslib.OldTransaction(row['idta'])
ta_to = ta_from.copyta(status=EXTERNOUT)
unique = str(botslib.unique(self.channeldict['idchannel'])) #create unique part for filename
if self.channeldict['filename']:
tofilename = self.channeldict['filename'].replace('*',unique) #filename is filename in channel where '*' is replaced by idta
else:
tofilename = unique
if self.userscript and hasattr(self.userscript,'filename'):
tofilename = botslib.runscript(self.userscript,self.scriptname,'filename',channeldict=self.channeldict,filename=tofilename,ta=ta_from)
if self.channeldict['ftpbinary']:
botslib.checkcodeciscompatible(row['charset'],self.channeldict['charset'])
fromfile = botslib.opendata(row['filename'], 'rb')
self.session.storbinary(mode + tofilename, fromfile)
else:
#~ self.channeldict['charset'] = 'us-ascii'
botslib.checkcodeciscompatible(row['charset'],self.channeldict['charset'])
fromfile = botslib.opendata(row['filename'], 'r')
self.session.storlines(mode + tofilename, fromfile)
fromfile.close()
except:
txt=botslib.txtexc()
ta_to.update(statust=ERROR,errortext=txt,filename='ftp:/'+posixpath.join(self.dirpath,tofilename))
else:
ta_from.update(statust=DONE)
ta_to.update(statust=DONE,filename='ftp:/'+posixpath.join(self.dirpath,tofilename))
def disconnect(self):
try:
self.session.quit()
except:
self.session.close()
botslib.settimeout(botsglobal.ini.getint('settings','globaltimeout',10))
class ftps(ftp):
''' explicit ftps as defined in RFC 2228 and RFC 4217.
standard port to connect to is as in normal FTP (port 21)
ftps is supported by python >= 2.7
'''
def connect(self):
botslib.settimeout(botsglobal.ini.getint('settings','ftptimeout',10))
if not hasattr(ftplib,'FTP_TLS'):
raise botslib.CommunicationError(_(u'ftps is not supported by your python version, use >=2.7'))
self.session = ftplib.FTP_TLS()
self.session.set_debuglevel(botsglobal.ini.getint('settings','ftpdebug',0)) #set debug level (0=no, 1=medium, 2=full debug)
self.session.set_pasv(not self.channeldict['ftpactive']) #active or passive ftp
self.session.connect(host=self.channeldict['host'],port=int(self.channeldict['port']))
#support key files (PEM, cert)?
self.session.auth()
self.session.login(user=self.channeldict['username'],passwd=self.channeldict['secret'],acct=self.channeldict['ftpaccount'])
self.session.prot_p()
self.set_cwd()
#sub classing of ftplib for ftpis
if hasattr(ftplib,'FTP_TLS'):
class FTP_TLS_IMPLICIT(ftplib.FTP_TLS):
''' FTPS implicit is not directly supported by python; python>=2.7 supports only ftps explicit.
So class ftplib.FTP_TLS is sub-classed here, with the needed modifications.
(code is nicked from ftplib.ftp v. 2.7; additions/changes are indicated)
'''
def connect(self, host='', port=0, timeout=-999):
#added hje 20110713: directly use SSL in FTPIS
import socket
import ssl
#end added
if host != '':
self.host = host
if port > 0:
self.port = port
if timeout != -999:
self.timeout = timeout
self.sock = socket.create_connection((self.host, self.port), self.timeout)
self.af = self.sock.family
#added hje 20110713: directly use SSL in FTPIS
self.sock = ssl.wrap_socket(self.sock, self.keyfile, self.certfile,ssl_version=self.ssl_version)
#end added
self.file = self.sock.makefile('rb')
self.welcome = self.getresp()
return self.welcome
def prot_p(self):
#Inovis FTPIS gives errors on 'PBSZ 0' and 'PROT P', vsftp does not work without these commands.
#These errors are just catched, nothing is done with them.
try:
self.voidcmd('PBSZ 0')
except ftplib.error_perm:
pass
try:
resp = self.voidcmd('PROT P')
except ftplib.error_perm:
resp = None
self._prot_p = True
return resp
class ftpis(ftp):
''' FTPS implicit; is not defined in a RFC.
standard port to connect is port 990.
FTPS implicit is not supported by python.
python>=2.7 supports ftps explicit.
So used is the sub-class FTP_TLS_IMPLICIT.
Tested with Inovis and VSFTPd.
Python library FTP_TLS uses ssl_version = ssl.PROTOCOL_TLSv1
Inovis seems to need PROTOCOL_SSLv3
This is 'solved' by using 'parameters' in the channel.
~ ssl.PROTOCOL_SSLv2 = 0
~ ssl.PROTOCOL_SSLv3 = 1
~ ssl.PROTOCOL_SSLv23 = 2
~ ssl.PROTOCOL_TLSv1 = 3
'''
def connect(self):
botslib.settimeout(botsglobal.ini.getint('settings','ftptimeout',10))
if not hasattr(ftplib,'FTP_TLS'):
raise botslib.CommunicationError(_(u'ftpis is not supported by your python version, use >=2.7'))
self.session = FTP_TLS_IMPLICIT()
if self.channeldict['parameters']:
self.session.ssl_version = int(self.channeldict['parameters'])
self.session.set_debuglevel(botsglobal.ini.getint('settings','ftpdebug',0)) #set debug level (0=no, 1=medium, 2=full debug)
self.session.set_pasv(not self.channeldict['ftpactive']) #active or passive ftp
self.session.connect(host=self.channeldict['host'],port=int(self.channeldict['port']))
#support key files (PEM, cert)?
#~ self.session.auth()
self.session.login(user=self.channeldict['username'],passwd=self.channeldict['secret'],acct=self.channeldict['ftpaccount'])
self.session.prot_p()
self.set_cwd()
class sftp(_comsession):
''' SSH File Transfer Protocol (SFTP is not FTP run over SSH, SFTP is not Simple File Transfer Protocol)
standard port to connect to is port 22.
requires paramiko and pycrypto to be installed
based on class ftp and ftps above with code from demo_sftp.py which is included with paramiko
Mike Griffin 16/10/2010
Henk-jan ebbers 20110802: when testing I found that the transport also needs to be closed. So changed transport ->self.transport, and close this in disconnect
henk-jan ebbers 20111019: disabled the host_key part for now (but is very interesting). Is not tested; keys should be integrated in bots also for other protocols.
'''
def connect(self):
# check dependencies
try:
import paramiko
except:
txt=botslib.txtexc()
raise ImportError(_(u'Dependency failure: communicationtype "sftp" requires python library "paramiko". Error:\n%s'%txt))
try:
from Crypto import Cipher
except:
txt=botslib.txtexc()
raise ImportError(_(u'Dependency failure: communicationtype "sftp" requires python library "pycrypto". Error:\n%s'%txt))
# setup logging if required
ftpdebug = botsglobal.ini.getint('settings','ftpdebug',0)
if ftpdebug > 0:
log_file = botslib.join(botsglobal.ini.get('directories','logging'),'sftp.log')
# Convert ftpdebug to paramiko logging level (1=20=info, 2=10=debug)
paramiko.util.log_to_file(log_file, 30-(ftpdebug*10))
# Get hostname and port to use
hostname = self.channeldict['host']
try:
port = int(self.channeldict['port'])
except:
port = 22 # default port for sftp
# get host key, if we know one
# (I have not tested this, just copied from demo)
hostkeytype = None
hostkey = None
#~ try:
#~ host_keys = paramiko.util.load_host_keys(os.path.expanduser('~/.ssh/known_hosts'))
#~ except IOError:
#~ try: # try ~/ssh/ too, because windows can't have a folder named ~/.ssh/
#~ host_keys = paramiko.util.load_host_keys(os.path.expanduser('~/ssh/known_hosts'))
#~ except IOError:
#~ host_keys = {}
#~ botsglobal.logger.debug(u'No host keys found for sftp')
#~ if host_keys.has_key(hostname):
#~ hostkeytype = host_keys[hostname].keys()[0]
#~ hostkey = host_keys[hostname][hostkeytype]
#~ botsglobal.logger.debug(u'Using host key of type "%s" for sftp',hostkeytype)
# now, connect and use paramiko Transport to negotiate SSH2 across the connection
self.transport = paramiko.Transport((hostname,port))
self.transport.connect(username=self.channeldict['username'],password=self.channeldict['secret'],hostkey=hostkey)
self.session = paramiko.SFTPClient.from_transport(self.transport)
channel = self.session.get_channel()
channel.settimeout(botsglobal.ini.getint('settings','ftptimeout',10))
self.session.chdir('.') # getcwd does not work without this chdir first!
self.dirpath = self.session.getcwd()
#set right path on ftp-server
if self.channeldict['path']:
self.dirpath = posixpath.normpath(posixpath.join(self.dirpath,self.channeldict['path']))
try:
self.session.chdir(self.dirpath)
except:
self.session.mkdir(self.dirpath)
self.session.chdir(self.dirpath)
def disconnect(self):
self.session.close()
self.transport.close()
@botslib.log_session
def incommunicate(self):
''' do ftp: receive files. To be used via receive-dispatcher.
each to be imported file is transaction.
each imported file is transaction.
'''
startdatetime = datetime.datetime.now()
files = self.session.listdir('.')
lijst = fnmatch.filter(files,self.channeldict['filename'])
for fromfilename in lijst: #fetch messages from sftp-server.
try:
ta_from = botslib.NewTransaction(filename='sftp:/'+posixpath.join(self.dirpath,fromfilename),
status=EXTERNIN,
fromchannel=self.channeldict['idchannel'],
charset=self.channeldict['charset'],idroute=self.idroute)
ta_to = ta_from.copyta(status=RAWIN)
tofilename = str(ta_to.idta)
# SSH treats all files as binary
tofile = botslib.opendata(tofilename, 'wb')
tofile.write(self.session.open(fromfilename, 'r').read())
tofile.close()
if self.channeldict['remove']:
self.session.remove(fromfilename)
except:
txt=botslib.txtexc()
botslib.ErrorProcess(functionname='sftp-incommunicate',errortext=txt,channeldict=self.channeldict)
ta_from.delete()
ta_to.delete()
else:
ta_from.update(statust=DONE)
ta_to.update(filename=tofilename,statust=OK)
finally:
if (datetime.datetime.now()-startdatetime).seconds >= self.maxsecondsperchannel:
break
@botslib.log_session
def outcommunicate(self):
''' do ftp: send files. To be used via receive-dispatcher.
each to be send file is transaction.
each send file is transaction.
'''
#check if one file or queue of files with unique names
if not self.channeldict['filename'] or '*'not in self.channeldict['filename']:
mode = 'a' #fixed filename; not unique: append to file
else:
mode = 'w' #unique filenames; (over)write
for row in botslib.query('''SELECT idta,filename,charset
FROM ta
WHERE idta>%(rootidta)s
AND status=%(status)s
AND statust=%(statust)s
AND tochannel=%(tochannel)s
''',
{'tochannel':self.channeldict['idchannel'],'rootidta':botslib.get_minta4query(),
'status':RAWOUT,'statust':OK}):
try:
ta_from = botslib.OldTransaction(row['idta'])
ta_to = ta_from.copyta(status=EXTERNOUT)
unique = str(botslib.unique(self.channeldict['idchannel'])) #create unique part for filename
if self.channeldict['filename']:
tofilename = self.channeldict['filename'].replace('*',unique) #filename is filename in channel where '*' is replaced by idta
else:
tofilename = unique
if self.userscript and hasattr(self.userscript,'filename'):
tofilename = botslib.runscript(self.userscript,self.scriptname,'filename',channeldict=self.channeldict,filename=tofilename,ta=ta_from)
# SSH treats all files as binary
botslib.checkcodeciscompatible(row['charset'],self.channeldict['charset'])
fromfile = botslib.opendata(row['filename'], 'rb')
self.session.open(tofilename, mode).write(fromfile.read())
fromfile.close()
except:
txt=botslib.txtexc()
ta_to.update(statust=ERROR,errortext=txt,filename='sftp:/'+posixpath.join(self.dirpath,tofilename))
else:
ta_from.update(statust=DONE)
ta_to.update(statust=DONE,filename='sftp:/'+posixpath.join(self.dirpath,tofilename))
class xmlrpc(_comsession):
scheme = 'http'
def connect(self):
self.uri = botslib.Uri(scheme=self.scheme,username=self.channeldict['username'],password=self.channeldict['secret'],host=self.channeldict['host'],port=self.channeldict['port'],path=self.channeldict['path'])
self.session = xmlrpclib.ServerProxy(self.uri.uri)
@botslib.log_session
def outcommunicate(self):
''' do xml-rpc: send files. To be used via receive-dispatcher.
each to be send file is transaction.
each send file is transaction.
'''
for row in botslib.query('''SELECT idta,filename,charset
FROM ta
WHERE tochannel=%(tochannel)s
AND status=%(status)s
AND statust=%(statust)s
AND idta>%(rootidta)s
''',
{'tochannel':self.channeldict['idchannel'],'rootidta':botslib.get_minta4query(),
'status':RAWOUT,'statust':OK}):
try:
ta_from = botslib.OldTransaction(row['idta'])
ta_to = ta_from.copyta(status=EXTERNOUT)
botslib.checkcodeciscompatible(row['charset'],self.channeldict['charset'])
fromfile = botslib.opendata(row['fromfilename'], 'rb',row['charset'])
content = fromfile.read()
fromfile.close()
tocall = getattr(self.session,self.channeldict['filename'])
filename = tocall(content)
except:
txt=botslib.txtexc()
ta_to.update(statust=ERROR,errortext=txt)
else:
ta_from.update(statust=DONE)
ta_to.update(statust=DONE,filename=self.uri.update(path=self.channeldict['path'],filename=str(filename)))
@botslib.log_session
def incommunicate(self):
startdatetime = datetime.datetime.now()
while (True):
try:
tocall = getattr(self.session,self.channeldict['path'])
content = tocall()
if content is None:
break #nothing (more) to receive.
ta_from = botslib.NewTransaction(filename=self.uri.update(path=self.channeldict['path'],filename=self.channeldict['filename']),
status=EXTERNIN,
fromchannel=self.channeldict['idchannel'],
charset=self.channeldict['charset'],idroute=self.idroute)
ta_to = ta_from.copyta(status=RAWIN)
tofilename = str(ta_to.idta)
tofile = botslib.opendata(tofilename, 'wb')
simplejson.dump(content, tofile, skipkeys=False, ensure_ascii=False, check_circular=False)
tofile.close()
except:
txt=botslib.txtexc()
botslib.ErrorProcess(functionname='xmlprc-incommunicate',errortext=txt,channeldict=self.channeldict)
ta_from.delete()
ta_to.delete()
else:
ta_from.update(statust=DONE)
ta_to.update(filename=tofilename,statust=OK)
finally:
if (datetime.datetime.now()-startdatetime).seconds >= self.maxsecondsperchannel:
break
class intercommit(_comsession):
def connect(self):
#TODO: check if intercommit program is installed/reachable
pass
@botslib.log_session
def incommunicate(self):
botslib.runexternprogram(botsglobal.ini.get('intercommit','path'), '-R')
frompath = botslib.join(self.channeldict['path'],self.channeldict['filename'])
for fromheadername in [c for c in glob.glob(frompath) if os.path.isfile(c)]: #get intercommit xml-header
try:
#open db-ta's
ta_from = botslib.NewTransaction(filename=fromheadername,
status=EXTERNIN,
fromchannel=self.channeldict['idchannel'],
charset=self.channeldict['charset'],
idroute=self.idroute)
ta_to = ta_from.copyta(status=RAWIN)
#parse the intercommit 'header'-file (named *.edi)
self.parsestuurbestand(filename=fromheadername,charset=self.channeldict['charset'])
#convert parameters (mail-addresses to partners-ID's; flename)
self.p['frompartner'] = self.mailaddress2idpartner(self.p['frommail'])
self.p['topartner'] = self.mailaddress2idpartner(self.p['tomail'])
fromfilename = botslib.join(self.channeldict['path'],self.p['Attachment'])
self.p['filename'] = str(ta_to.idta)
#read/write files (xml=header is already done
fromfile = open(fromfilename,'rb')
tofile = botslib.opendata(self.p['filename'], 'wb')
shutil.copyfileobj(fromfile,tofile)
fromfile.close()
tofile.close()
if self.channeldict['remove']:
os.remove(fromfilename)
os.remove(fromheadername)
except:
txt=botslib.txtexc()
ta_from.update(statust=ERROR,errortext=txt,filename=fromfilename)
ta_to.delete()
else:
ta_from.update(statust=DONE,filename=fromfilename)
ta_to.update(statust=OK,**self.p)
def parsestuurbestand(self,filename,charset):
self.p = {}
edifile = inmessage.edifromfile(filename=filename,messagetype='intercommitenvelope',editype='xml',charset=charset)
for inn in edifile.nextmessage():
break
self.p['frommail'] = inn.get({'BOTSID':'Edicon'},{'BOTSID':'Header'},{'BOTSID':'From','BOTSCONTENT':None})
self.p['tomail'] = inn.get({'BOTSID':'Edicon'},{'BOTSID':'Header'},{'BOTSID':'To','BOTSCONTENT':None})
self.p['reference'] = inn.get({'BOTSID':'Edicon'},{'BOTSID':'Header'},{'BOTSID':'X-ClientMsgID','BOTSCONTENT':None})
self.p['Subject'] = inn.get({'BOTSID':'Edicon'},{'BOTSID':'Header'},{'BOTSID':'Subject','BOTSCONTENT':None})
self.p['Attachment'] = inn.get({'BOTSID':'Edicon'},{'BOTSID':'Body'},{'BOTSID':'Attachment','BOTSCONTENT':None})
@botslib.log_session
def outcommunicate(self):
#check if output dir exists, else create it.
dirforintercommitsend = botslib.join(self.channeldict['path'])
botslib.dirshouldbethere(dirforintercommitsend)
#output to one file or a queue of files (with unique names)
if not self.channeldict['filename'] or '*'not in self.channeldict['filename']:
raise botslib.CommunicationOutError(_(u'channel "$channel" needs unique filenames (no queue-file); use eg *.edi as value for "filename"'),channel=self.channeldict['idchannel'])
else:
mode = 'wb' #unique filenames; (over)write
#select the db-ta's for this channel
for row in botslib.query('''SELECT idta,filename,frompartner,topartner,charset
FROM ta
WHERE idta>%(rootidta)s
AND status=%(status)s
AND statust=%(statust)s
AND tochannel=%(idchannel)s
AND idroute=%(idroute)s
''',
{'idchannel':self.channeldict['idchannel'],'rootidta':botslib.get_minta4query(),
'status':RAWOUT,'statust':OK,'idroute':self.idroute}):
try: #for each db-ta:
ta_attr={} #ta_attr contains attributes used for updating ta
ta_from = botslib.OldTransaction(row['idta'])
ta_to = ta_from.copyta(status=EXTERNOUT)
#check encoding for outchannel
botslib.checkcodeciscompatible(row['charset'],self.channeldict['charset'])
#create unique for filenames of xml-header file and contentfile
uniquepart = str(botslib.unique(self.channeldict['idchannel'])) #create unique part for filenames
statusfilename = self.channeldict['filename'].replace('*',uniquepart) #filename is filename in channel where '*' is replaced by idta
statusfilenamewithpath = botslib.join(dirforintercommitsend,statusfilename)
(filenamewithoutext,ext)=os.path.splitext(statusfilename)
datafilename = filenamewithoutext + '.dat'
ta_attr['filename'] = botslib.join(dirforintercommitsend,datafilename)
ta_attr['frompartner'],nep = self.idpartner2mailaddress(row['frompartner'])
ta_attr['topartner'],nep = self.idpartner2mailaddress(row['topartner'])
ta_attr['reference'] = email.Utils.make_msgid(str(row['idta']))[1:-1] #[1:-1]: strip angle brackets
#create xml-headerfile
out = outmessage.outmessage_init(messagetype='intercommitenvelope',editype='xml',filename=statusfilenamewithpath) #make outmessage object
out.put({'BOTSID':'Edicon'},{'BOTSID':'Header'},{'BOTSID':'From','BOTSCONTENT':ta_attr['frompartner']})
out.put({'BOTSID':'Edicon'},{'BOTSID':'Header'},{'BOTSID':'To','BOTSCONTENT':ta_attr['topartner']})
out.put({'BOTSID':'Edicon'},{'BOTSID':'Header'},{'BOTSID':'Subject','BOTSCONTENT':ta_attr['reference']})
out.put({'BOTSID':'Edicon'},{'BOTSID':'Body'},{'BOTSID':'Attachment','Type':'external','BOTSCONTENT':datafilename})
out.put({'BOTSID':'Edicon'},{'BOTSID':'Header'},{'BOTSID':'X-mtype','BOTSCONTENT':'EDI'})
out.writeall() #write tomessage (result of translation)
#read/write datafiles
tofile = open(ta_attr['filename'], mode)
fromfile = open(row['filename'], 'rb')
shutil.copyfileobj(fromfile,tofile)
fromfile.close()
tofile.close()
except:
txt=botslib.txtexc()
ta_to.update(statust=ERROR,errortext=txt)
else:
ta_from.update(statust=DONE)
ta_to.update(statust=DONE,**ta_attr)
botslib.runexternprogram(botsglobal.ini.get('intercommit','path'),'-s')
def disconnect(self):
statusfilenaam = botslib.join(botsglobal.ini.get('intercommit','logfile'))
edifile = inmessage.edifromfile(filename=statusfilenaam,messagetype='intercommitstatus',editype='csv',charset='utf-8')
for inn in edifile.nextmessage():
for inline in inn.getloop({'BOTSID':'regel'}):
statuse = int(inline.get({'BOTSID':'regel','Berichtstatus':None}))
ICID = inline.get({'BOTSID':'regel','X-ClientMsgID':None})
if statuse==2:
subject = inline.get({'BOTSID':'regel','Onderwerp':None})
botslib.change(u'''UPDATE ta
SET statuse=%(statuse)s, reference=%(newref)s
WHERE reference = %(oldref)s
AND status=%(status)s''',
{'status':EXTERNOUT,'oldref':subject,'newref':ICID,'statuse':statuse})
else:
botslib.change(u'''UPDATE ta
SET statuse=%(statuse)s
WHERE reference = %(reference)s
AND status=%(status)s''',
{'status':EXTERNOUT,'reference':ICID,'statuse':statuse})
os.remove(statusfilenaam)
class database(_comsession):
''' ***this class is obsolete and only heere for compatibility reasons.
***this class is replaced by class db
communicate with a database; directly read or write from a database.
the user HAS to provide a script that does the actual import/export using SQLalchemy API.
use of channel parameters:
- path: contains the connection string (a sqlachlemy db uri)
- idchannel: name user script that does the database query & data formatting. in usersys/dbconnectors. ' main' function is called.
incommunicate (read from database) expects a json object. In the mapping script this is presented the usual way - use inn.get() etc.
outcommunicate (write to database) gets a json object.
'''
def connect(self):
self.dbscript,self.dbscriptname = botslib.botsimport('dbconnectors',self.channeldict['idchannel']) #get the dbconnector-script
if not hasattr(self.dbscript,'main'):
raise botslib.ScriptImportError(_(u'No function "$function" in imported script "$script".'),function='main',script=self.dbscript)
try:
import sqlalchemy
except:
txt=botslib.txtexc()
raise ImportError(_(u'Dependency failure: communication type "database" requires python library "sqlalchemy". Error:\n%s'%txt))
from sqlalchemy.orm import sessionmaker
engine = sqlalchemy.create_engine(self.channeldict['path'],strategy='threadlocal')
self.metadata = sqlalchemy.MetaData()
self.metadata.bind = engine
Session = sessionmaker(bind=engine, autoflush=False, transactional=True)
self.session = Session()
@botslib.log_session
def incommunicate(self):
''' read data from database.
'''
jsonobject = botslib.runscript(self.dbscript,self.dbscriptname,'main',channeldict=self.channeldict,session=self.session,metadata=self.metadata)
self.session.flush()
self.session.commit()
#should be checked more elaborate if jsonobject has 'real' data?
if jsonobject:
ta_from = botslib.NewTransaction(filename=self.channeldict['path'],
status=EXTERNIN,
fromchannel=self.channeldict['idchannel'],
charset=self.channeldict['charset'],idroute=self.idroute)
ta_to = ta_from.copyta(status=RAWIN)
tofilename = str(ta_to.idta)
tofile = botslib.opendata(tofilename,'wb',charset=u'utf-8')
simplejson.dump(jsonobject, tofile, skipkeys=False, ensure_ascii=False, check_circular=False)
tofile.close()
ta_from.update(statust=DONE)
ta_to.update(filename=tofilename,statust=OK)
@botslib.log_session
def outcommunicate(self):
''' write data to database.
'''
for row in botslib.query('''SELECT idta,filename
FROM ta
WHERE idta>%(rootidta)s
AND status=%(status)s
AND statust=%(statust)s
AND tochannel=%(tochannel)s
''',
{'tochannel':self.channeldict['idchannel'],'rootidta':botslib.get_minta4query(),
'status':RAWOUT,'statust':OK}):
try:
ta_from = botslib.OldTransaction(row['idta'])
ta_to = ta_from.copyta(status=EXTERNOUT)
fromfile = botslib.opendata(row['filename'], 'rb',charset=u'utf-8')
jsonobject = simplejson.load(fromfile)
fromfile.close()
botslib.runscript(self.dbscript,self.dbscriptname,'main',channeldict=self.channeldict,session=self.session,metadata=self.metadata,content=jsonobject)
self.session.flush()
self.session.commit()
except:
self.session.rollback()
txt=botslib.txtexc()
ta_to.update(statust=ERROR,errortext=txt,filename=self.channeldict['path'])
else:
ta_from.update(statust=DONE)
ta_to.update(statust=DONE,filename=self.channeldict['path'])
def disconnect(self):
self.session.close()
#~ pass
class db(_comsession):
''' communicate with a database; directly read or write from a database.
the user HAS to provide a script file in usersys/communicationscripts that does the actual import/export using **some** python database library.
the user script file should contain:
- connect
- (for incoming) incommunicate
- (for outgoing) outcommunicate
- disconnect
Other parameters are passed, use them for your own convenience.
Bots 'pickles' the results returned from the user scripts (and unpickles for the translation).
'''
def connect(self):
if self.userscript is None:
raise ImportError(_(u'Channel "%s" is type "db", but no communicationscript exists.'%self.channeldict['idchannel']))
#check functions bots assumes to be present in user script:
if not hasattr(self.userscript,'connect'):
raise botslib.ScriptImportError(_(u'No function "connect" in imported script "$script".'),script=self.scriptname)
if self.channeldict['inorout']=='in' and not hasattr(self.userscript,'incommunicate'):
raise botslib.ScriptImportError(_(u'No function "incommunicate" in imported script "$script".'),script=self.scriptname)
if self.channeldict['inorout']=='out' and not hasattr(self.userscript,'outcommunicate'):
raise botslib.ScriptImportError(_(u'No function "outcommunicate" in imported script "$script".'),script=self.scriptname)
if not hasattr(self.userscript,'disconnect'):
raise botslib.ScriptImportError(_(u'No function "disconnect" in imported script "$script".'),script=self.scriptname)
self.dbconnection = botslib.runscript(self.userscript,self.scriptname,'connect',channeldict=self.channeldict)
@botslib.log_session
def incommunicate(self):
''' read data from database.
returns db_objects;
if this is None, do nothing
if this is a list, treat each member of the list as a separate 'message'
'''
db_objects = botslib.runscript(self.userscript,self.scriptname,'incommunicate',channeldict=self.channeldict,dbconnection=self.dbconnection)
if not db_objects:
return
if not isinstance(db_objects,list):
db_objects = [db_objects]
for db_object in db_objects:
ta_from = botslib.NewTransaction(filename=self.channeldict['path'],
status=EXTERNIN,
fromchannel=self.channeldict['idchannel'],
charset=self.channeldict['charset'],
idroute=self.idroute)
ta_to = ta_from.copyta(status=RAWIN)
tofilename = str(ta_to.idta)
tofile = botslib.opendata(tofilename,'wb')
pickle.dump(db_object, tofile,2)
tofile.close()
ta_from.update(statust=DONE)
ta_to.update(filename=tofilename,statust=OK)
@botslib.log_session
def outcommunicate(self):
''' write data to database.
'''
for row in botslib.query('''SELECT idta,filename
FROM ta
WHERE idta>%(rootidta)s
AND status=%(status)s
AND statust=%(statust)s
AND tochannel=%(tochannel)s
''',
{'tochannel':self.channeldict['idchannel'],'rootidta':botslib.get_minta4query(),
'status':RAWOUT,'statust':OK}):
try:
ta_from = botslib.OldTransaction(row['idta'])
ta_to = ta_from.copyta(status=EXTERNOUT)
fromfile = botslib.opendata(row['filename'], 'rb')
db_object = pickle.load(fromfile)
fromfile.close()
botslib.runscript(self.userscript,self.scriptname,'outcommunicate',channeldict=self.channeldict,dbconnection=self.dbconnection,db_object=db_object)
except:
txt=botslib.txtexc()
ta_to.update(statust=ERROR,errortext=txt,filename=self.channeldict['path'])
else:
ta_from.update(statust=DONE)
ta_to.update(statust=DONE,filename=self.channeldict['path'])
def disconnect(self):
botslib.runscript(self.userscript,self.scriptname,'disconnect',channeldict=self.channeldict,dbconnection=self.dbconnection)
class communicationscript(_comsession):
"""
For running an (user maintained) communication script.
Examples of use:
- call external communication program
- call external program that extract messages from ERP-database
- call external program that imports messages in ERP system
- communication method not available in Bots ***or use sub-classing for this***
- specialised I/O wishes; eg specific naming of output files. (eg including partner name) ***beter: use sub-classing or have more user exits***
place of communication scripts: bots/usersys/communicationscripts
name of communication script: same name as channel (the channelID)
in this communication script some functions will be called:
- connect (required)
- main (optional, 'main' should handle files one by one)
- disconnect (required)
arguments: dict 'channel' which has all channel attributes
more parameters/data for communication script: hard code this in communication script; or use bots.ini
Different ways of working:
1. for incoming files (bots receives the files):
1.1 connect puts all files in a directory, there is no 'main' function. bots can remove the files (if you use the 'remove' switch of the channel).
1.2 connect only builds the connection, 'main' is a generator that passes the messages one by one (using 'yield'). bots can remove the files (if you use the 'remove' switch of the channel).
2. for outgoing files (bots sends the files):
2.1 if there is a 'main' function: the 'main' function is called by bots after writing each file. bots can remove the files (if you use the 'remove' switch of the channel).
2.2 no 'main' function: the processing of all the files can be done in 'disconnect'. bots can remove the files (if you use the 'remove' switch of the channel).
"""
def connect(self):
if self.userscript is None or not botslib.tryrunscript(self.userscript,self.scriptname,'connect',channeldict=self.channeldict):
raise ImportError(_(u'Channel "%s" is type "communicationscript", but no communicationscript exists.'%self.channeldict['idchannel']))
@botslib.log_session
def incommunicate(self):
startdatetime = datetime.datetime.now()
if hasattr(self.userscript,'main'): #process files one by one; script has to be a generator
for fromfilename in botslib.runscriptyield(self.userscript,self.scriptname,'main',channeldict=self.channeldict):
try:
ta_from = botslib.NewTransaction(filename = fromfilename,
status = EXTERNIN,
fromchannel = self.channeldict['idchannel'],
charset = self.channeldict['charset'], idroute = self.idroute)
ta_to = ta_from.copyta(status = RAWIN)
fromfile = open(fromfilename, 'rb')
tofilename = str(ta_to.idta)
tofile = botslib.opendata(tofilename, 'wb')
shutil.copyfileobj(fromfile,tofile)
fromfile.close()
tofile.close()
if self.channeldict['remove']:
os.remove(fromfilename)
except:
txt=botslib.txtexc()
botslib.ErrorProcess(functionname='communicationscript-incommunicate',errortext=txt,channeldict=self.channeldict)
ta_from.delete()
ta_to.delete()
else:
ta_from.update(statust=DONE)
ta_to.update(filename=tofilename,statust=OK)
finally:
if (datetime.datetime.now()-startdatetime).seconds >= self.maxsecondsperchannel:
break
else: #all files have been set ready by external script using 'connect'.
frompath = botslib.join(self.channeldict['path'], self.channeldict['filename'])
for fromfilename in [c for c in glob.glob(frompath) if os.path.isfile(c)]:
try:
ta_from = botslib.NewTransaction(filename = fromfilename,
status = EXTERNIN,
fromchannel = self.channeldict['idchannel'],
charset = self.channeldict['charset'], idroute = self.idroute)
ta_to = ta_from.copyta(status = RAWIN)
fromfile = open(fromfilename, 'rb')
tofilename = str(ta_to.idta)
tofile = botslib.opendata(tofilename, 'wb')
shutil.copyfileobj(fromfile,tofile)
fromfile.close()
tofile.close()
if self.channeldict['remove']:
os.remove(fromfilename)
except:
txt=botslib.txtexc()
botslib.ErrorProcess(functionname='communicationscript-incommunicate',errortext=txt,channeldict=self.channeldict)
ta_from.delete()
ta_to.delete()
else:
ta_from.update(statust=DONE)
ta_to.update(filename=tofilename,statust=OK)
finally:
if (datetime.datetime.now()-startdatetime).seconds >= self.maxsecondsperchannel:
break
@botslib.log_session
def outcommunicate(self):
#check if output dir exists, else create it.
outputdir = botslib.join(self.channeldict['path'])
botslib.dirshouldbethere(outputdir)
#output to one file or a queue of files (with unique names)
if not self.channeldict['filename'] or '*'not in self.channeldict['filename']:
mode = 'ab' #fixed filename; not unique: append to file
else:
mode = 'wb' #unique filenames; (over)write
#select the db-ta's for this channel
for row in botslib.query(u'''SELECT idta,filename,charset
FROM ta
WHERE idta>%(rootidta)s
AND status=%(status)s
AND statust=%(statust)s
AND tochannel=%(tochannel)s
''',
{'tochannel':self.channeldict['idchannel'],'rootidta':botslib.get_minta4query(),
'status':RAWOUT,'statust':OK}):
try: #for each db-ta:
ta_from = botslib.OldTransaction(row['idta'])
ta_to = ta_from.copyta(status=EXTERNOUT)
botslib.checkcodeciscompatible(row['charset'],self.channeldict['charset'])
#open tofile, incl syslock if indicated
unique = str(botslib.unique(self.channeldict['idchannel'])) #create unique part for filename
if self.channeldict['filename']:
filename = self.channeldict['filename'].replace('*',unique) #filename is filename in channel where '*' is replaced by idta
else:
filename = unique
tofilename = botslib.join(outputdir,filename)
tofile = open(tofilename, mode)
#open fromfile
fromfile = botslib.opendata(row['filename'], 'rb')
#copy
shutil.copyfileobj(fromfile,tofile)
fromfile.close()
tofile.close()
#one file is written; call external
if botslib.tryrunscript(self.userscript,self.scriptname,'main',channeldict=self.channeldict,filename=tofilename):
if self.channeldict['remove']:
os.remove(tofilename)
except:
txt=botslib.txtexc()
ta_to.update(statust=ERROR,errortext=txt)
else:
ta_from.update(statust=DONE)
ta_to.update(statust=DONE,filename=tofilename)
def disconnect(self):
botslib.tryrunscript(self.userscript,self.scriptname,'disconnect',channeldict=self.channeldict)
if self.channeldict['remove'] and not hasattr(self.userscript,'main'): #if bots should remove the files, and all files are passed at once, delete these files.
outputdir = botslib.join(self.channeldict['path'], self.channeldict['filename'])
for filename in [c for c in glob.glob(outputdir) if os.path.isfile(c)]:
try:
os.remove(filename)
except:
pass
| Python |
import sys
from django.utils.translation import ugettext as _
#bots-modules
import communication
import envelope
import transform
import botslib
import botsglobal
import preprocess
from botsconfig import *
@botslib.log_session
def prepareretransmit():
''' prepare the retransmittable files. Return: indication if files should be retransmitted.'''
retransmit = False #indicate retransmit
#for rereceive
for row in botslib.query('''SELECT idta,reportidta
FROM filereport
WHERE retransmit=%(retransmit)s ''',
{'retransmit':True}):
retransmit = True
botslib.change('''UPDATE filereport
SET retransmit=%(retransmit)s
WHERE idta=%(idta)s
AND reportidta=%(reportidta)s ''',
{'idta':row['idta'],'reportidta':row['reportidta'],'retransmit':False})
for row2 in botslib.query('''SELECT idta
FROM ta
WHERE parent=%(parent)s
AND status=%(status)s''',
{'parent':row['idta'],
'status':RAWIN}):
ta_rereceive = botslib.OldTransaction(row2['idta'])
ta_externin = ta_rereceive.copyta(status=EXTERNIN,statust=DONE,parent=0) #inject; status is DONE so this ta is not used further
ta_raw = ta_externin.copyta(status=RAWIN,statust=OK) #reinjected file is ready as new input
#for resend; this one is slow. Can be improved by having a separate list of idta to resend
for row in botslib.query('''SELECT idta,parent
FROM ta
WHERE retransmit=%(retransmit)s
AND status=%(status)s''',
{'retransmit':True,
'status':EXTERNOUT}):
retransmit = True
ta_outgoing = botslib.OldTransaction(row['idta'])
ta_outgoing.update(retransmit=False) #is reinjected; set retransmit back to False
ta_resend = botslib.OldTransaction(row['parent']) #parent ta with status RAWOUT; this is where the outgoing file is kept
ta_externin = ta_resend.copyta(status=EXTERNIN,statust=DONE,parent=0) #inject; status is DONE so this ta is not used further
ta_raw = ta_externin.copyta(status=RAWOUT,statust=OK) #reinjected file is ready as new input
return retransmit
@botslib.log_session
def preparerecommunication():
#for each out-communication process that went wrong:
retransmit = False #indicate retransmit
for row in botslib.query('''SELECT idta,tochannel
FROM ta
WHERE statust!=%(statust)s
AND status=%(status)s
AND retransmit=%(retransmit)s ''',
{'status':PROCESS,'retransmit':True,'statust':DONE}):
run_outgoing = botslib.OldTransaction(row['idta'])
run_outgoing.update(retransmit=False) #set retransmit back to False
#get rootidta of run where communication failed
for row2 in botslib.query('''SELECT max(idta) as rootidta
FROM ta
WHERE script=%(script)s
AND idta<%(thisidta)s ''',
{'script':0,'thisidta':row['idta']}):
rootidta = row2['rootidta']
#get endidta of run where communication failed
for row3 in botslib.query('''SELECT min(idta) as endidta
FROM ta
WHERE script=%(script)s
AND idta>%(thisidta)s ''',
{'script':0,'thisidta':row['idta']}):
endidta = row3['endidta']
if not endidta:
endidta = sys.maxint - 1
#reinject
for row4 in botslib.query('''SELECT idta
FROM ta
WHERE idta<%(endidta)s
AND idta>%(rootidta)s
AND status=%(status)s
AND statust=%(statust)s
AND tochannel=%(tochannel)s ''',
{'statust':OK,'status':RAWOUT,'rootidta':rootidta,'endidta':endidta,'tochannel':row['tochannel']}):
retransmit = True
ta_outgoing = botslib.OldTransaction(row4['idta'])
ta_outgoing_copy = ta_outgoing.copyta(status=RAWOUT,statust=OK)
ta_outgoing.update(statust=DONE)
return retransmit
@botslib.log_session
def prepareautomaticrecommunication():
''' reinjects all files for which communication failed (status = RAWOUT)
'''
retransmit = False #indicate retransmit
#bots keeps track of last time automaticretrycommunication was done; reason is mainly performance
startidta = max(botslib.keeptrackoflastretry('bots__automaticretrycommunication',botslib.getlastrun()),botslib.get_idta_last_error())
#reinject
for row4 in botslib.query('''SELECT idta
FROM ta
WHERE idta>%(startidta)s
AND status=%(status)s
AND statust=%(statust)s ''',
{'statust':OK,'status':RAWOUT,'startidta':startidta}):
retransmit = True
ta_outgoing = botslib.OldTransaction(row4['idta'])
ta_outgoing_copy = ta_outgoing.copyta(status=RAWOUT,statust=OK)
ta_outgoing.update(statust=DONE)
return retransmit
@botslib.log_session
def prepareretry():
''' reinjects all files for which communication failed (status = RAWOUT)
'''
retransmit = False #indicate retransmit
#bots keeps track of last time retry was done; reason is mainly performance
startidta = max(botslib.keeptrackoflastretry('bots__retry',botslib.getlastrun()),botslib.get_idta_last_error())
#reinject
for row4 in botslib.query('''SELECT idta,status
FROM ta
WHERE idta>%(startidta)s
AND statust=%(statust)s ''',
{'statust':OK,'startidta':startidta}):
retransmit = True
ta_outgoing = botslib.OldTransaction(row4['idta'])
ta_outgoing_copy = ta_outgoing.copyta(status=row4['status'],statust=OK)
ta_outgoing.update(statust=DONE)
return retransmit
@botslib.log_session
def routedispatcher(routestorun,type=None):
''' run all route(s). '''
if type == '--retransmit':
if not prepareretransmit():
return 0
elif type == '--retrycommunication':
if not preparerecommunication():
return 0
elif type == '--automaticretrycommunication':
if not prepareautomaticrecommunication():
return 0
elif type == '--retry':
if not prepareretry():
return 0
stuff2evaluate = botslib.getlastrun()
botslib.set_minta4query()
for route in routestorun:
foundroute=False
botslib.setpreprocessnumber(SET_FOR_PROCESSING)
for routedict in botslib.query('''SELECT idroute ,
fromchannel_id as fromchannel,
tochannel_id as tochannel,
fromeditype,
frommessagetype,
alt,
frompartner_id as frompartner,
topartner_id as topartner,
toeditype,
tomessagetype,
seq,
frompartner_tochannel_id,
topartner_tochannel_id,
testindicator,
translateind,
defer
FROM routes
WHERE idroute=%(idroute)s
AND active=%(active)s
ORDER BY seq''',
{'idroute':route,'active':True}):
botsglobal.logger.info(_(u'running route %(idroute)s %(seq)s'),{'idroute':routedict['idroute'],'seq':routedict['seq']})
botslib.setrouteid(routedict['idroute'])
foundroute=True
router(routedict)
botslib.setrouteid('')
botsglobal.logger.debug(u'finished route %s %s',routedict['idroute'],routedict['seq'])
if not foundroute:
botsglobal.logger.warning(_(u'there is no (active) route "%s".'),route)
return stuff2evaluate
@botslib.log_session
def router(routedict):
''' communication.run one route. variants:
- a route can be just script;
- a route can do only incoming
- a route can do only outgoing
- a route can do both incoming and outgoing
- at several points functions from a route script are called - if function is in route script
'''
#is there a user route script?
try:
userscript,scriptname = botslib.botsimport('routescripts',routedict['idroute'])
except ImportError: #other errors, eg syntax errors are just passed
userscript = scriptname = None
#if user route script has function 'main': communication.run 'main' (and do nothing else)
if botslib.tryrunscript(userscript,scriptname,'main',routedict=routedict):
return #so: if function ' main' : communication.run only the routescript, nothing else.
if not (userscript or routedict['fromchannel'] or routedict['tochannel'] or routedict['translateind']):
raise botslib.ScriptError(_(u'Route "$route" is empty: no script, not enough parameters.'),route=routedict['idroute'])
botslib.tryrunscript(userscript,scriptname,'start',routedict=routedict)
#communication.run incoming channel
if routedict['fromchannel']: #do incoming part of route: in-communication; set ready for translation; translate
botslib.tryrunscript(userscript,scriptname,'preincommunication',routedict=routedict)
communication.run(idchannel=routedict['fromchannel'],idroute=routedict['idroute']) #communication.run incommunication
#add attributes from route to the received files
where={'status':FILEIN,'fromchannel':routedict['fromchannel'],'idroute':routedict['idroute']}
change={'editype':routedict['fromeditype'],'messagetype':routedict['frommessagetype'],'frompartner':routedict['frompartner'],'topartner':routedict['topartner'],'alt':routedict['alt']}
botslib.updateinfo(change=change,where=where)
#all received files have status FILEIN
botslib.tryrunscript(userscript,scriptname,'postincommunication',routedict=routedict)
if routedict['fromeditype'] == 'mailbag': #mailbag for the route.
preprocess.preprocess(routedict,preprocess.mailbag)
#communication.run translation
if routedict['translateind']:
botslib.tryrunscript(userscript,scriptname,'pretranslation',routedict=routedict)
botslib.addinfo(change={'status':TRANSLATE},where={'status':FILEIN,'idroute':routedict['idroute']})
transform.translate(idroute=routedict['idroute'])
botslib.tryrunscript(userscript,scriptname,'posttranslation',routedict=routedict)
#merge messages & communication.run outgoing channel
if routedict['tochannel']: #do outgoing part of route
botslib.tryrunscript(userscript,scriptname,'premerge',routedict=routedict)
envelope.mergemessages(idroute=routedict['idroute'])
botslib.tryrunscript(userscript,scriptname,'postmerge',routedict=routedict)
#communication.run outgoing channel
#build for query: towhere (dict) and wherestring
towhere=dict(status=MERGED,
idroute=routedict['idroute'],
editype=routedict['toeditype'],
messagetype=routedict['tomessagetype'],
testindicator=routedict['testindicator'])
towhere=dict([(key, value) for (key, value) in towhere.iteritems() if value]) #remove nul-values from dict
wherestring = ' AND '.join([key+'=%('+key+')s' for key in towhere])
if routedict['frompartner_tochannel_id']: #use frompartner_tochannel in where-clause of query (partner/group dependent outchannel
towhere['frompartner_tochannel_id']=routedict['frompartner_tochannel_id']
wherestring += ''' AND (frompartner=%(frompartner_tochannel_id)s
OR frompartner in (SELECT from_partner_id
FROM partnergroup
WHERE to_partner_id =%(frompartner_tochannel_id)s ))'''
if routedict['topartner_tochannel_id']: #use topartner_tochannel in where-clause of query (partner/group dependent outchannel
towhere['topartner_tochannel_id']=routedict['topartner_tochannel_id']
wherestring += ''' AND (topartner=%(topartner_tochannel_id)s
OR topartner in (SELECT from_partner_id
FROM partnergroup
WHERE to_partner_id=%(topartner_tochannel_id)s ))'''
toset={'tochannel':routedict['tochannel'],'status':FILEOUT}
botslib.addinfocore(change=toset,where=towhere,wherestring=wherestring)
if not routedict['defer']: #do outgoing part of route
botslib.tryrunscript(userscript,scriptname,'preoutcommunication',routedict=routedict)
communication.run(idchannel=routedict['tochannel'],idroute=routedict['idroute']) #communication.run outcommunication
botslib.tryrunscript(userscript,scriptname,'postoutcommunication',routedict=routedict)
botslib.tryrunscript(userscript,scriptname,'end',routedict=routedict)
| Python |
import sys
import os
import botsinit
import botslib
import grammar
def showusage():
print
print " Usage: %s -c<directory> <editype> <messagetype>"%os.path.basename(sys.argv[0])
print
print " Checks a Bots grammar."
print " Same checks are used as in translations with bots-engine."
print " Searches for grammar in regular place: bots/usersys/grammars/<editype>/<messagetype>.py"
print " Options:"
print " -c<directory> directory for configuration files (default: config)."
print " Example:"
print " %s -cconfig edifact ORDERSD96AUNEAN008"%os.path.basename(sys.argv[0])
print
sys.exit(0)
def startmulti(grammardir,editype):
''' used in seperate tool for bulk checking of gramamrs while developing edifact->botsgramamrs '''
import glob
botslib.generalinit('config')
botslib.initenginelogging()
for g in glob.glob(grammardir):
g1 = os.path.basename(g)
g2 = os.path.splitext(g1)[0]
if g1 in ['__init__.py']:
continue
if g1.startswith('edifact'):
continue
if g1.startswith('records') or g1.endswith('records.py'):
continue
try:
grammar.grammarread(editype,g2)
except:
#~ print 'Found error in grammar:',g
print botslib.txtexc()
print '\n'
else:
print 'OK - no error found in grammar',g,'\n'
def start():
#********command line arguments**************************
editype =''
messagetype = ''
configdir = 'config'
for arg in sys.argv[1:]:
if not arg:
continue
if arg.startswith('-c'):
configdir = arg[2:]
if not configdir:
print ' !!Indicated Bots should use specific .ini file but no file name was given.'
showusage()
elif arg in ["?", "/?"] or arg.startswith('-'):
showusage()
else:
if not editype:
editype = arg
else:
messagetype = arg
if not (editype and messagetype):
print ' !!Both editype and messagetype are required.'
showusage()
#********end handling command line arguments**************************
try:
botsinit.generalinit(configdir)
botsinit.initenginelogging()
grammar.grammarread(editype,messagetype)
except:
print 'Found error in grammar:'
print botslib.txtexc()
else:
print 'OK - no error found in grammar'
if __name__=='__main__':
start()
| Python |
import os
import sys
import atexit
import traceback
import logging
#import bots-modules
import bots.botslib as botslib
import bots.botsglobal as botsglobal
def showusage():
print ' Update existing bots database for new release 1.6.0'
print ' Options:'
print " -c<directory> directory for configuration files (default: config)."
def start(configdir = 'config'):
#********command line arguments**************************
for arg in sys.argv[1:]:
if not arg:
continue
if arg.startswith('-c'):
configdir = arg[2:]
if not configdir:
print 'Indicated Bots should use specific .ini file but no file name was given.'
sys.exit(1)
elif arg in ["?", "/?"] or arg.startswith('-'):
showusage()
sys.exit(0)
else: #pick up names of routes to run
showusage()
#**************initialise configuration file******************************
try:
botsinit.generalinit(configdir)
botslib.settimeout(botsglobal.ini.getint('settings','globaltimeout',10)) #
except:
traceback.print_exc()
print 'Error in reading/initializing ini-file.'
sys.exit(1)
#**************initialise logging******************************
try:
botsinit.initenginelogging()
except:
traceback.print_exc()
print 'Error in initialising logging system.'
sys.exit(1)
else:
atexit.register(logging.shutdown)
botsglobal.logger.info('Python version: "%s".',sys.version)
botsglobal.logger.info('Bots configuration file: "%s".',botsinifile)
botsglobal.logger.info('Bots database configuration file: "%s".',botslib.join('config',os.path.basename(botsglobal.ini.get('directories','tgconfig','botstg.cfg'))))
#**************connect to database**********************************
try:
botslib.connect()
except:
traceback.print_exc()
print 'Error connecting to database.'
sys.exit(1)
else:
atexit.register(botsglobal.db.close)
try:
cursor = botsglobal.db.cursor()
cursor.execute('''ALTER TABLE routes ADD COLUMN notindefaultrun BOOLEAN''',None)
cursor.execute('''ALTER TABLE channel ADD COLUMN archivepath VARCHAR(256)''',None)
cursor.execute('''ALTER TABLE partner ADD COLUMN mail VARCHAR(256)''',None)
cursor.execute('''ALTER TABLE partner ADD COLUMN cc VARCHAR(256)''',None)
cursor.execute('''ALTER TABLE chanpar ADD COLUMN cc VARCHAR(256)''',None)
cursor.execute('''ALTER TABLE ta ADD COLUMN confirmasked BOOLEAN''',None)
cursor.execute('''ALTER TABLE ta ADD COLUMN confirmed BOOLEAN''',None)
cursor.execute('''ALTER TABLE ta ADD COLUMN confirmtype VARCHAR(35) DEFAULT '' ''',None)
cursor.execute('''ALTER TABLE ta ADD COLUMN confirmidta INTEGER DEFAULT 0''',None)
cursor.execute('''ALTER TABLE ta ADD COLUMN envelope VARCHAR(35) DEFAULT '' ''',None)
cursor.execute('''ALTER TABLE ta ADD COLUMN botskey VARCHAR(35) DEFAULT '' ''',None)
cursor.execute('''ALTER TABLE ta ADD COLUMN cc VARCHAR(512) DEFAULT '' ''',None)
if botsglobal.dbinfo.drivername == 'mysql':
cursor.execute('''ALTER TABLE ta MODIFY errortext VARCHAR(2048)''',None)
elif botsglobal.dbinfo.drivername == 'postgres':
cursor.execute('''ALTER TABLE ta ALTER COLUMN errortext type VARCHAR(2048)''',None)
#else: #sqlite does not allow modifying existing field, but does not check lengths either so this works.
cursor.execute('''CREATE TABLE confirmrule (
id INTEGER PRIMARY KEY,
active BOOLEAN,
confirmtype VARCHAR(35),
ruletype VARCHAR(35),
negativerule BOOLEAN,
frompartner VARCHAR(35),
topartner VARCHAR(35),
idchannel VARCHAR(35),
idroute VARCHAR(35),
editype VARCHAR(35),
messagetype VARCHAR(35) )
''',None)
except:
traceback.print_exc()
print 'Error while updating the database. Database is not updated.'
botsglobal.db.rollback()
sys.exit(1)
botsglobal.db.commit()
cursor.close()
print 'Database is updated.'
sys.exit(0)
| Python |
import decimal
import copy
from django.utils.translation import ugettext as _
import botslib
import botsglobal
from botsconfig import *
comparekey=None
def nodecompare(node):
global comparekey
return node.get(*comparekey)
class Node(object):
''' Node class for building trees in inmessage and outmessage
'''
def __init__(self,record=None,BOTSIDnr=None):
self.record = record #record is a dict with fields
if self.record is not None:
if BOTSIDnr is None:
if not 'BOTSIDnr' in self.record:
self.record['BOTSIDnr'] = '1'
else:
self.record['BOTSIDnr'] = BOTSIDnr
self.children = []
self._queries = None
def getquerie(self):
''' get queries of a node '''
if self._queries:
return self._queries
else:
return {}
def updatequerie(self,updatequeries):
''' set/update queries of a node with dict queries.
'''
if updatequeries:
if self._queries is None:
self._queries = updatequeries.copy()
else:
self._queries.update(updatequeries)
queries = property(getquerie,updatequerie)
def processqueries(self,queries,maxlevel):
''' copies values for queries 'down the tree' untill right level.
So when edi file is split up in messages,
querie-info from higher levels is copied to message.'''
self.queries = queries
if self.record and not maxlevel:
return
for child in self.children:
child.processqueries(self.queries,maxlevel-1)
def append(self,node):
'''append child to node'''
self.children += [node]
def display(self,level=0):
'''for debugging
usage: in mapping script: inn.root.display()
'''
if level==0:
print 'displaying all nodes in node tree:'
print ' '*level,self.record
for child in self.children:
child.display(level+1)
def displayqueries(self,level=0):
'''for debugging
usage: in mapping script: inn.root.displayqueries()
'''
if level==0:
print 'displaying queries for nodes in tree'
print ' '*level,'node:',
if self.record:
print self.record['BOTSID'],
else:
print 'None',
print '',
print self.queries
for child in self.children:
child.displayqueries(level+1)
def enhancedget(self,mpaths,replace=False):
''' to get QUERIES or SUBTRANSLATION while parsing edifile;
mpath can be
- dict: do get(mpath); can not be a mpath with multiple
- tuple: do get(mpath); can be multiple dicts in mapth
- list: for each listmembr do a get(); append the results
Used by:
- QUERIES
- SUBTRANSLATION
'''
if isinstance(mpaths,dict):
return self.get(mpaths)
elif isinstance(mpaths,tuple):
return self.get(*mpaths)
elif isinstance(mpaths,list):
collect = u''
for mpath in mpaths:
found = self.get(mpath)
if found:
if replace:
found = found.replace('.','_')
collect += found
return collect
else:
raise botslib.MappingFormatError(_(u'must be dict, list or tuple: enhancedget($mpath)'),mpath=mpaths)
def change(self,where,change):
''' '''
#find first matching node using 'where'. Do not look at other matching nodes (is a feature)
#prohibit change of BOTSID?
mpaths = where #diff from getcore
if not mpaths or not isinstance(mpaths,tuple):
raise botslib.MappingFormatError(_(u'parameter "where" must be tuple: change(where=$where,change=$change)'),where=where,change=change)
#check: 'BOTSID' is required
#check: all values should be strings
for part in mpaths:
if not isinstance(part,dict):
raise botslib.MappingFormatError(_(u'parameter "where" must be dicts in a tuple: change(where=$where,change=$change)'),where=where,change=change)
if not 'BOTSID' in part:
raise botslib.MappingFormatError(_(u'section without "BOTSID": change(where=$where,change=$change)'),where=where,change=change)
for key,value in part.iteritems():
if not isinstance(key,basestring):
raise botslib.MappingFormatError(_(u'keys must be strings: change(where=$where,change=$change)'),where=where,change=change)
if not isinstance(value,basestring):
raise botslib.MappingFormatError(_(u'values must be strings: change(where=$where,change=$change)'),where=where,change=change)
#check change parameter
if not change or not isinstance(change,dict):
raise botslib.MappingFormatError(_(u'parameter "change" must be dict: change(where=$where,change=$change)'),where=where,change=change)
#remove 'BOTSID' from change.
#check: all values should be strings
change.pop('BOTSID','nep')
for key,value in change.iteritems():
if not isinstance(key,basestring):
raise botslib.MappingFormatError(_(u'keys in "change" must be strings: change(where=$where,change=$change)'),where=where,change=change)
if not isinstance(value,basestring) and value is not None:
raise botslib.MappingFormatError(_(u'values in "change" must be strings or "None": change(where=$where,change=$change)'),where=where,change=change)
#go get it!
terug = self._changecore(where,change)
botsglobal.logmap.debug(u'"%s" for change(where=%s,change=%s)',terug,str(where),str(change))
return terug
def _changecore(self,where,change): #diff from getcore
mpaths = where #diff from getcore
mpath = mpaths[0]
if self.record['BOTSID'] == mpath['BOTSID']: #is record-id equal to mpath-botsid? Not strictly needed, but gives much beter performance...
for part in mpath: #check all mpath-parts;
if part in self.record:
if mpath[part] == self.record[part]:
continue
else: #content of record-field and mpath-part do not match
return False
else: #not all parts of mpath are in record, so no match:
return False
else: #all parts are matched, and OK.
if len(mpaths) == 1: #mpath is exhausted; so we are there!!! #replace values with values in 'change'; delete if None
for key,value in change.iteritems():
if value is None:
self.record.pop(key,'dummy for pop')
else:
self.record[key]=value
return True
else:
for childnode in self.children:
terug = childnode._changecore(mpaths[1:],change) #search recursive for rest of mpaths #diff from getcore
if terug:
return terug
else: #no child has given a valid return
return False
else: #record-id is not equal to mpath-botsid, so no match
return False
def delete(self,*mpaths):
''' delete the last record of mpath if found (first: find/identify, than delete. '''
if not mpaths or not isinstance(mpaths,tuple):
raise botslib.MappingFormatError(_(u'must be dicts in tuple: delete($mpath)'),mpath=mpaths)
if len(mpaths) ==1:
raise botslib.MappingFormatError(_(u'only one dict: not allowed. Use different solution: delete($mpath)'),mpath=mpaths)
#check: None only allowed in last section of Mpath (check firsts sections)
#check: 'BOTSID' is required
#check: all values should be strings
for part in mpaths:
if not isinstance(part,dict):
raise botslib.MappingFormatError(_(u'must be dicts in tuple: delete($mpath)'),mpath=mpaths)
if not 'BOTSID' in part:
raise botslib.MappingFormatError(_(u'section without "BOTSID": delete($mpath)'),mpath=mpaths)
for key,value in part.iteritems():
if not isinstance(key,basestring):
raise botslib.MappingFormatError(_(u'keys must be strings: delete($mpath)'),mpath=mpaths)
if not isinstance(value,basestring):
raise botslib.MappingFormatError(_(u'values must be strings: delete($mpath)'),mpath=mpaths)
#go get it!
terug = bool(self._deletecore(*mpaths))
botsglobal.logmap.debug(u'"%s" for delete%s',terug,str(mpaths))
return terug #return False if not removed, return True if removed
def _deletecore(self,*mpaths):
mpath = mpaths[0]
if self.record['BOTSID'] == mpath['BOTSID']: #is record-id equal to mpath-botsid? Not strictly needed, but gives much beter performance...
for part in mpath: #check all mpath-parts;
if part in self.record:
if mpath[part] == self.record[part]:
continue
else: #content of record-field and mpath-part do not match
return 0
else: #not all parts of mpath are in record, so no match:
return 0
else: #all parts are matched, and OK.
if len(mpaths) == 1: #mpath is exhausted; so we are there!!!
return 2
else:
for i, childnode in enumerate(self.children):
terug = childnode._deletecore(*mpaths[1:]) #search recursive for rest of mpaths
if terug == 2: #indicates node should be removed
del self.children[i] #remove node
return 1 #this indicates: deleted successfull, do not remove anymore (no removal of parents)
if terug:
return terug
else: #no child has given a valid return
return 0
else: #record-id is not equal to mpath-botsid, so no match
return 0
def get(self,*mpaths):
''' get value of a field in a record from a edi-message
mpath is xpath-alike query to identify the record/field
function returns 1 value; return None if nothing found.
if more than one value can be found: first one is returned
starts searching in current node, then deeper
'''
if not mpaths or not isinstance(mpaths,tuple):
raise botslib.MappingFormatError(_(u'must be dicts in tuple: get($mpath)'),mpath=mpaths)
for part in mpaths:
if not isinstance(part,dict):
raise botslib.MappingFormatError(_(u'must be dicts in tuple: get($mpath)'),mpath=mpaths)
#check: None only allowed in last section of Mpath (check firsts sections)
#check: 'BOTSID' is required
#check: all values should be strings
for part in mpaths[:-1]:
if not 'BOTSID' in part:
raise botslib.MappingFormatError(_(u'section without "BOTSID": get($mpath)'),mpath=mpaths)
if not 'BOTSIDnr' in part:
part['BOTSIDnr'] = '1'
for key,value in part.iteritems():
if not isinstance(key,basestring):
raise botslib.MappingFormatError(_(u'keys must be strings: get($mpath)'),mpath=mpaths)
if not isinstance(value,basestring):
raise botslib.MappingFormatError(_(u'values must be strings: get($mpath)'),mpath=mpaths)
#check: None only allowed in last section of Mpath (check last section)
#check: 'BOTSID' is required
#check: all values should be strings
if not 'BOTSID' in mpaths[-1]:
raise botslib.MappingFormatError(_(u'last section without "BOTSID": get($mpath)'),mpath=mpaths)
if not 'BOTSIDnr' in mpaths[-1]:
mpaths[-1]['BOTSIDnr'] = '1'
count = 0
for key,value in mpaths[-1].iteritems():
if not isinstance(key,basestring):
raise botslib.MappingFormatError(_(u'keys must be strings in last section: get($mpath)'),mpath=mpaths)
if value is None:
count += 1
elif not isinstance(value,basestring):
raise botslib.MappingFormatError(_(u'values must be strings (or none) in last section: get($mpath)'),mpath=mpaths)
if count > 1:
raise botslib.MappingFormatError(_(u'max one "None" in last section: get($mpath)'),mpath=mpaths)
#go get it!
terug = self._getcore(*mpaths)
botsglobal.logmap.debug(u'"%s" for get%s',terug,str(mpaths))
return terug
def _getcore(self,*mpaths):
mpath = mpaths[0]
terug = 1 #if there is no 'None' in the mpath, but everything is matched, 1 is returned (like True)
if self.record['BOTSID'] == mpath['BOTSID']: #is record-id equal to mpath-botsid? Not strictly needed, but gives much beter performance...
for part in mpath: #check all mpath-parts;
if part in self.record:
if mpath[part] is None: #this is the field we are looking for; but not all matches have been made so remember value
terug = self.record[part][:] #copy to avoid memory problems
else: #compare values of mpath-part and recordfield
if mpath[part] == self.record[part]:
continue
else: #content of record-field and mpath-part do not match
return None
else: #not all parts of mpath are in record, so no match:
return None
else: #all parts are matched, and OK.
if len(mpaths) == 1: #mpath is exhausted; so we are there!!!
return terug
else:
for childnode in self.children:
terug = childnode._getcore(*mpaths[1:]) #search recursive for rest of mpaths
if terug:
return terug
else: #no child has given a valid return
return None
else: #record-id is not equal to mpath-botsid, so no match
return None
def getcount(self):
'''count the number of nodes/records uner the node/in whole tree'''
count = 0
if self.record:
count += 1 #count itself
for child in self.children:
count += child.getcount()
return count
def getcountoccurrences(self,*mpaths):
''' count number of occurences of mpath. Eg count nr of LIN's'''
count = 0
for value in self.getloop(*mpaths):
count += 1
return count
def getcountsum(self,*mpaths):
''' return the sum for all values found in mpath. Eg total number of ordered quantities.'''
count = decimal.Decimal(0)
mpathscopy = copy.deepcopy(mpaths)
for key,value in mpaths[-1].items():
if value is None:
del mpathscopy[-1][key]
for i in self.getloop(*mpathscopy):
value = i.get(mpaths[-1])
if value:
count += decimal.Decimal(value)
return unicode(count)
def getloop(self,*mpaths):
''' generator. Returns one by one the nodes as indicated in mpath
'''
#check validity mpaths
if not mpaths or not isinstance(mpaths,tuple):
raise botslib.MappingFormatError(_(u'must be dicts in tuple: getloop($mpath)'),mpath=mpaths)
for part in mpaths:
if not isinstance(part,dict):
raise botslib.MappingFormatError(_(u'must be dicts in tuple: getloop($mpath)'),mpath=mpaths)
if not 'BOTSID' in part:
raise botslib.MappingFormatError(_(u'section without "BOTSID": getloop($mpath)'),mpath=mpaths)
if not 'BOTSIDnr' in part:
part['BOTSIDnr'] = '1'
for key,value in part.iteritems():
if not isinstance(key,basestring):
raise botslib.MappingFormatError(_(u'keys must be strings: getloop($mpath)'),mpath=mpaths)
if not isinstance(value,basestring):
raise botslib.MappingFormatError(_(u'values must be strings: getloop($mpath)'),mpath=mpaths)
for terug in self._getloopcore(*mpaths):
botsglobal.logmap.debug(u'getloop %s returns "%s".',mpaths,terug.record)
yield terug
def _getloopcore(self,*mpaths):
''' recursive part of getloop()
'''
mpath = mpaths[0]
if self.record['BOTSID'] == mpath['BOTSID']: #found right record
for part in mpath:
if not part in self.record or mpath[part] != self.record[part]:
return
else: #all parts are checked, and OK.
if len(mpaths) == 1:
yield self
else:
for childnode in self.children:
for terug in childnode._getloopcore(*mpaths[1:]): #search recursive for rest of mpaths
yield terug
return
def getnozero(self,*mpaths):
terug = self.get(*mpaths)
try:
value = float(terug)
except TypeError:
return None
except ValueError:
return None
if value == 0:
return None
return terug
def put(self,*mpaths,**kwargs):
if not mpaths or not isinstance(mpaths,tuple):
raise botslib.MappingFormatError(_(u'must be dicts in tuple: put($mpath)'),mpath=mpaths)
for part in mpaths:
if not isinstance(part,dict):
raise botslib.MappingFormatError(_(u'must be dicts in tuple: put($mpath)'),mpath=mpaths)
#check: 'BOTSID' is required
#check: all values should be strings
for part in mpaths:
if not 'BOTSID' in part:
raise botslib.MappingFormatError(_(u'section without "BOTSID": put($mpath)'),mpath=mpaths)
if not 'BOTSIDnr' in part:
part['BOTSIDnr'] = '1'
for key,value in part.iteritems():
if value is None:
botsglobal.logmap.debug(u'"None" in put %s.',str(mpaths))
return False
if not isinstance(key,basestring):
raise botslib.MappingFormatError(_(u'keys must be strings: put($mpath)'),mpath=mpaths)
if kwargs and 'strip' in kwargs and kwargs['strip'] == False:
part[key] = unicode(value) #used for fixed ISA header of x12
else:
part[key] = unicode(value).strip() #leading and trailing spaces are stripped from the values
if self.sameoccurence(mpaths[0]):
self._putcore(*mpaths[1:])
else:
raise botslib.MappingRootError(_(u'error in root put "$mpath".'),mpath=mpaths[0])
botsglobal.logmap.debug(u'"True" for put %s',str(mpaths))
return True
def _putcore(self,*mpaths):
if not mpaths: #newmpath is exhausted, stop searching.
return True
for node in self.children:
if node.record['BOTSID']==mpaths[0]['BOTSID'] and node.sameoccurence(mpaths[0]):
node._putcore(*mpaths[1:])
return
else: #is not present in children, so append
self.append(Node(mpaths[0]))
self.children[-1]._putcore(*mpaths[1:])
def putloop(self,*mpaths):
if not mpaths or not isinstance(mpaths,tuple):
raise botslib.MappingFormatError(_(u'must be dicts in tuple: putloop($mpath)'),mpath=mpaths)
for part in mpaths:
if not isinstance(part,dict):
raise botslib.MappingFormatError(_(u'must be dicts in tuple: putloop($mpath)'),mpath=mpaths)
#check: 'BOTSID' is required
#check: all values should be strings
for part in mpaths:
if not 'BOTSID' in part:
raise botslib.MappingFormatError(_(u'section without "BOTSID": putloop($mpath)'),mpath=mpaths)
if not 'BOTSIDnr' in part:
part['BOTSIDnr'] = '1'
for key,value in part.iteritems():
if not isinstance(key,basestring):
raise botslib.MappingFormatError(_(u'keys must be strings: putloop($mpath)'),mpath=mpaths)
if value is None:
return False
#~ if not isinstance(value,basestring):
#~ raise botslib.MappingFormatError(_(u'values must be strings in putloop%s'%(str(mpaths)))
part[key] = unicode(value).strip()
if self.sameoccurence(mpaths[0]):
if len(mpaths)==1:
return self
return self._putloopcore(*mpaths[1:])
else:
raise botslib.MappingRootError(_(u'error in root putloop "$mpath".'),mpath=mpaths[0])
def _putloopcore(self,*mpaths):
if len(mpaths) ==1: #end of mpath reached; always make new child-node
self.append(Node(mpaths[0]))
return self.children[-1]
for node in self.children: #if first part of mpaths exists already in children go recursive
if node.record['BOTSID']==mpaths[0]['BOTSID'] and node.record['BOTSIDnr']==mpaths[0]['BOTSIDnr'] and node.sameoccurence(mpaths[0]):
return node._putloopcore(*mpaths[1:])
else: #is not present in children, so append a child, and go recursive
self.append(Node(mpaths[0]))
return self.children[-1]._putloopcore(*mpaths[1:])
def sameoccurence(self, mpath):
for key,value in self.record.iteritems():
if (key in mpath) and (mpath[key]!=value):
return False
else: #all equal keys have same values, thus both are 'equal'.
self.record.update(mpath)
return True
def sort(self,*mpaths):
global comparekey
comparekey = mpaths[1:]
self.children.sort(key=nodecompare)
| Python |
import django
from django.contrib import admin
from django.utils.translation import ugettext as _
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.contrib.admin.util import unquote, flatten_fieldsets, get_deleted_objects, model_ngettext, model_format_dict
from django.core.exceptions import PermissionDenied
from django.utils.encoding import force_unicode
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.text import capfirst, get_text_list
#***********
import models
import botsglobal
admin.site.disable_action('delete_selected')
class BotsAdmin(admin.ModelAdmin):
list_per_page = botsglobal.ini.getint('settings','adminlimit',botsglobal.ini.getint('settings','limit',30))
save_as = True
def delete_view(self, request, object_id, extra_context=None):
''' copy from admin.ModelAdmin; adapted: do not check references: no cascading deletes; no confirmation.'''
opts = self.model._meta
app_label = opts.app_label
try:
obj = self.queryset(request).get(pk=unquote(object_id))
except self.model.DoesNotExist:
obj = None
if not self.has_delete_permission(request, obj):
raise PermissionDenied(_(u'Permission denied'))
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
obj_display = force_unicode(obj)
self.log_deletion(request, obj, obj_display)
obj.delete()
self.message_user(request, _('The %(name)s "%(obj)s" was deleted successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj_display)})
if not self.has_change_permission(request, None):
return HttpResponseRedirect("../../../../")
return HttpResponseRedirect("../../")
def activate(self, request, queryset):
''' admin action.'''
for obj in queryset:
obj.active = not obj.active
obj.save()
activate.short_description = _(u'activate/de-activate')
def bulk_delete(self, request, queryset):
''' admin action.'''
for obj in queryset:
obj.delete()
bulk_delete.short_description = _(u'delete selected')
#*****************************************************************************************************
class CcodeAdmin(BotsAdmin):
actions = ('bulk_delete',)
list_display = ('ccodeid','leftcode','rightcode','attr1','attr2','attr3','attr4','attr5','attr6','attr7','attr8')
list_display_links = ('ccodeid',)
list_filter = ('ccodeid',)
ordering = ('ccodeid','leftcode')
search_fields = ('ccodeid__ccodeid','leftcode','rightcode','attr1','attr2','attr3','attr4','attr5','attr6','attr7','attr8')
def lookup_allowed(self, lookup, *args, **kwargs):
if lookup.startswith('ccodeid'):
return True
return super(CcodeAdmin, self).lookup_allowed(lookup, *args, **kwargs)
admin.site.register(models.ccode,CcodeAdmin)
class CcodetriggerAdmin(BotsAdmin):
actions = ('bulk_delete',)
list_display = ('ccodeid','ccodeid_desc',)
list_display_links = ('ccodeid',)
ordering = ('ccodeid',)
search_fields = ('ccodeid','ccodeid_desc')
admin.site.register(models.ccodetrigger,CcodetriggerAdmin)
class ChannelAdmin(BotsAdmin):
actions = ('bulk_delete',)
list_display = ('idchannel', 'inorout', 'type','host', 'port', 'username', 'secret', 'path', 'filename', 'remove', 'charset', 'archivepath','rsrv2','ftpactive', 'ftpbinary','askmdn', 'syslock', 'starttls','apop')
list_filter = ('inorout','type')
ordering = ('idchannel',)
search_fields = ('idchannel', 'inorout', 'type','host', 'username', 'path', 'filename', 'archivepath', 'charset')
fieldsets = (
(None, {'fields': ('idchannel', ('inorout','type'), ('host','port'), ('username', 'secret'), ('path', 'filename'), 'remove', 'archivepath', 'charset','desc')
}),
(_(u'FTP specific data'),{'fields': ('ftpactive', 'ftpbinary', 'ftpaccount' ),
'classes': ('collapse',)
}),
(_(u'Advanced'),{'fields': (('lockname', 'syslock'), 'parameters', 'starttls','apop','askmdn','rsrv2'),
'classes': ('collapse',)
}),
)
admin.site.register(models.channel,ChannelAdmin)
class ConfirmruleAdmin(BotsAdmin):
actions = ('activate','bulk_delete')
list_display = ('active','negativerule','confirmtype','ruletype', 'frompartner', 'topartner','idroute','idchannel','editype','messagetype')
list_display_links = ('confirmtype',)
list_filter = ('active','confirmtype','ruletype')
search_fields = ('confirmtype','ruletype', 'frompartner__idpartner', 'topartner__idpartner', 'idroute', 'idchannel__idchannel', 'editype', 'messagetype')
ordering = ('confirmtype','ruletype')
fieldsets = (
(None, {'fields': ('active','negativerule','confirmtype','ruletype','frompartner', 'topartner','idroute','idchannel',('editype','messagetype'))}),
)
admin.site.register(models.confirmrule,ConfirmruleAdmin)
class MailInline(admin.TabularInline):
model = models.chanpar
fields = ('idchannel','mail', 'cc')
extra = 1
class MyPartnerAdminForm(django.forms.ModelForm):
''' customs form for partners to check if group has groups'''
class Meta:
model = models.partner
def clean(self):
super(MyPartnerAdminForm, self).clean()
if self.cleaned_data['isgroup'] and self.cleaned_data['group']:
raise django.forms.util.ValidationError(_(u'A group can not be part of a group.'))
return self.cleaned_data
class PartnerAdmin(BotsAdmin):
actions = ('bulk_delete','activate')
form = MyPartnerAdminForm
fields = ('active', 'isgroup', 'idpartner', 'name','mail','cc','group')
filter_horizontal = ('group',)
inlines = (MailInline,)
list_display = ('active','isgroup','idpartner', 'name','mail','cc')
list_display_links = ('idpartner',)
list_filter = ('active','isgroup')
ordering = ('idpartner',)
search_fields = ('idpartner','name','mail','cc')
admin.site.register(models.partner,PartnerAdmin)
class RoutesAdmin(BotsAdmin):
actions = ('bulk_delete','activate')
list_display = ('active', 'idroute', 'seq', 'fromchannel', 'fromeditype', 'frommessagetype', 'alt', 'frompartner', 'topartner', 'translateind', 'tochannel', 'defer', 'toeditype', 'tomessagetype', 'frompartner_tochannel', 'topartner_tochannel', 'testindicator', 'notindefaultrun')
list_display_links = ('idroute',)
list_filter = ('idroute','active','fromeditype')
ordering = ('idroute','seq')
search_fields = ('idroute', 'fromchannel__idchannel','fromeditype', 'frommessagetype', 'alt', 'tochannel__idchannel','toeditype', 'tomessagetype')
fieldsets = (
(None, {'fields': ('active',('idroute', 'seq'),'fromchannel', ('fromeditype', 'frommessagetype'),'translateind','tochannel','desc')}),
(_(u'Filtering for outchannel'),{'fields':('toeditype', 'tomessagetype','frompartner_tochannel', 'topartner_tochannel', 'testindicator'),
'classes': ('collapse',)
}),
(_(u'Advanced'),{'fields': ('alt', 'frompartner', 'topartner', 'notindefaultrun','defer'),
'classes': ('collapse',)
}),
)
admin.site.register(models.routes,RoutesAdmin)
class MyTranslateAdminForm(django.forms.ModelForm):
''' customs form for translations to check if entry exists (unique_together not validated right (because of null values in partner fields))'''
class Meta:
model = models.translate
def clean(self):
super(MyTranslateAdminForm, self).clean()
b = models.translate.objects.filter(fromeditype=self.cleaned_data['fromeditype'],
frommessagetype=self.cleaned_data['frommessagetype'],
alt=self.cleaned_data['alt'],
frompartner=self.cleaned_data['frompartner'],
topartner=self.cleaned_data['topartner'])
if b and (self.instance.pk is None or self.instance.pk != b[0].id):
raise django.forms.util.ValidationError(_(u'Combination of fromeditype,frommessagetype,alt,frompartner,topartner already exists.'))
return self.cleaned_data
class TranslateAdmin(BotsAdmin):
actions = ('bulk_delete','activate')
form = MyTranslateAdminForm
list_display = ('active', 'fromeditype', 'frommessagetype', 'alt', 'frompartner', 'topartner', 'tscript', 'toeditype', 'tomessagetype')
list_display_links = ('fromeditype',)
list_filter = ('active','fromeditype','toeditype')
ordering = ('fromeditype','frommessagetype')
search_fields = ('fromeditype', 'frommessagetype', 'alt', 'frompartner__idpartner', 'topartner__idpartner', 'tscript', 'toeditype', 'tomessagetype')
fieldsets = (
(None, {'fields': ('active', ('fromeditype', 'frommessagetype'),'tscript', ('toeditype', 'tomessagetype','desc'))
}),
(_(u'Advanced - multiple translations per editype/messagetype'),{'fields': ('alt', 'frompartner', 'topartner'),
'classes': ('collapse',)
}),
)
admin.site.register(models.translate,TranslateAdmin)
class UniekAdmin(BotsAdmin): #AKA counters
actions = None
list_display = ('domein', 'nummer')
list_editable = ('nummer',)
ordering = ('domein',)
search_fields = ('domein',)
admin.site.register(models.uniek,UniekAdmin)
#User - change the default display of user screen
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
UserAdmin.list_display = ('username', 'first_name', 'last_name','email', 'is_active', 'is_staff', 'is_superuser', 'date_joined','last_login')
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
| Python |
import sys
import os
import encodings
import codecs
import ConfigParser
import logging, logging.handlers
from django.utils.translation import ugettext as _
#Bots-modules
from botsconfig import *
import botsglobal
import botslib
class BotsConfig(ConfigParser.SafeConfigParser):
''' See SafeConfigParser.
'''
def get(self,section, option, default=''):
try:
return ConfigParser.SafeConfigParser.get(self,section,option)
except: #if there is no such section,option
if default == '':
raise botslib.BotsError(_(u'No entry "$entry" in section "$section" in "bots.ini".'),entry=option,section=section)
return default
def getint(self,section, option, default):
try:
return ConfigParser.SafeConfigParser.getint(self,section,option)
except:
return default
def getboolean(self,section, option, default):
try:
return ConfigParser.SafeConfigParser.getboolean(self,section,option)
except:
return default
def generalinit(configdir):
#Set Configdir
#Configdir MUST be importable. So configdir is relative to PYTHONPATH. Try several options for this import.
try: #configdir outside bots-directory: import configdir.settings.py
importnameforsettings = os.path.normpath(os.path.join(configdir,'settings')).replace(os.sep,'.')
settings = botslib.botsbaseimport(importnameforsettings)
except ImportError: #configdir is in bots directory: import bots.configdir.settings.py
try:
importnameforsettings = os.path.normpath(os.path.join('bots',configdir,'settings')).replace(os.sep,'.')
settings = botslib.botsbaseimport(importnameforsettings)
except ImportError: #set pythonpath to config directory first
if not os.path.exists(configdir): #check if configdir exists.
raise botslib.BotsError(_(u'In initilisation: path to configuration does not exists: "$path".'),path=configdir)
addtopythonpath = os.path.abspath(os.path.dirname(configdir))
#~ print 'add pythonpath for usersys',addtopythonpath
moduletoimport = os.path.basename(configdir)
sys.path.append(addtopythonpath)
importnameforsettings = os.path.normpath(os.path.join(moduletoimport,'settings')).replace(os.sep,'.')
settings = botslib.botsbaseimport(importnameforsettings)
#settings are accessed using botsglobal
botsglobal.settings = settings
#Find pathname configdir using imported settings.py.
configdirectory = os.path.abspath(os.path.dirname(settings.__file__))
#Read configuration-file bots.ini.
botsglobal.ini = BotsConfig()
cfgfile = open(os.path.join(configdirectory,'bots.ini'), 'r')
botsglobal.ini.readfp(cfgfile)
cfgfile.close()
#Set usersys.
#usersys MUST be importable. So usersys is relative to PYTHONPATH. Try several options for this import.
usersys = botsglobal.ini.get('directories','usersys','usersys')
try: #usersys outside bots-directory: import usersys
importnameforusersys = os.path.normpath(usersys).replace(os.sep,'.')
importedusersys = botslib.botsbaseimport(importnameforusersys)
except ImportError: #usersys is in bots directory: import bots.usersys
try:
importnameforusersys = os.path.normpath(os.path.join('bots',usersys)).replace(os.sep,'.')
importedusersys = botslib.botsbaseimport(importnameforusersys)
except ImportError: #set pythonpath to usersys directory first
if not os.path.exists(usersys): #check if configdir exists.
raise botslib.BotsError(_(u'In initilisation: path to configuration does not exists: "$path".'),path=usersys)
addtopythonpath = os.path.abspath(os.path.dirname(usersys)) #????
moduletoimport = os.path.basename(usersys)
#~ print 'add pythonpath for usersys',addtopythonpath
sys.path.append(addtopythonpath)
importnameforusersys = os.path.normpath(usersys).replace(os.sep,'.')
importedusersys = botslib.botsbaseimport(importnameforusersys)
#set directory settings in bots.ini************************************************************
botsglobal.ini.set('directories','botspath',botsglobal.settings.PROJECT_PATH)
botsglobal.ini.set('directories','config',configdirectory)
botsglobal.ini.set('directories','usersysabs',os.path.abspath(os.path.dirname(importedusersys.__file__))) #???Find pathname usersys using imported usersys
botsglobal.usersysimportpath = importnameforusersys
botssys = botsglobal.ini.get('directories','botssys','botssys')
botsglobal.ini.set('directories','botssys',botslib.join(botssys))
botsglobal.ini.set('directories','data',botslib.join(botssys,'data'))
botslib.dirshouldbethere(botsglobal.ini.get('directories','data'))
botsglobal.ini.set('directories','logging',botslib.join(botssys,'logging'))
botslib.dirshouldbethere(botsglobal.ini.get('directories','logging'))
botsglobal.ini.set('directories','templates',botslib.join(botsglobal.ini.get('directories','usersysabs'),'grammars/template/templates'))
botsglobal.ini.set('directories','templateshtml',botslib.join(botsglobal.ini.get('directories','usersysabs'),'grammars/templatehtml/templates'))
#set values in setting.py**********************************************************************
if botsglobal.ini.get('webserver','environment','development') == 'development': #values in bots.ini are also used in setting up cherrypy
settings.DEBUG = True
else:
settings.DEBUG = False
settings.TEMPLATE_DEBUG = settings.DEBUG
#set paths in settings.py:
#~ settings.FILE_UPLOAD_TEMP_DIR = os.path.join(settings.PROJECT_PATH, 'botssys/pluginsuploaded')
#start initializing bots charsets
initbotscharsets()
#set environment for django to start***************************************************************************************************
os.environ['DJANGO_SETTINGS_MODULE'] = importnameforsettings
initbotscharsets()
botslib.settimeout(botsglobal.ini.getint('settings','globaltimeout',10)) #
def initbotscharsets():
'''set up right charset handling for specific charsets (UNOA, UNOB, UNOC, etc).'''
codecs.register(codec_search_function) #tell python how to search a codec defined by bots. These are the codecs in usersys/charset
botsglobal.botsreplacechar = unicode(botsglobal.ini.get('settings','botsreplacechar',u' '))
codecs.register_error('botsreplace', botscharsetreplace) #define the ' botsreplace' error handling for codecs/charsets.
for key, value in botsglobal.ini.items('charsets'): #set aliases for charsets in bots.ini
encodings.aliases.aliases[key]=value
def codec_search_function(encoding):
try:
module,filename = botslib.botsimport('charsets',encoding)
except:
return None
else:
if hasattr(module,'getregentry'):
return module.getregentry()
else:
return None
def botscharsetreplace(info):
'''replaces an char outside a charset by a user defined char. Useful eg for fixed records: recordlength does not change. Do not know if this works for eg UTF-8...'''
return (botsglobal.botsreplacechar, info.start+1)
def initenginelogging():
convertini2logger={'DEBUG':logging.DEBUG,'INFO':logging.INFO,'WARNING':logging.WARNING,'ERROR':logging.ERROR,'CRITICAL':logging.CRITICAL}
# create main logger 'bots'
botsglobal.logger = logging.getLogger('bots')
botsglobal.logger.setLevel(logging.DEBUG)
# create rotating file handler
log_file = botslib.join(botsglobal.ini.get('directories','logging'),'engine.log')
rotatingfile = logging.handlers.RotatingFileHandler(log_file,backupCount=botsglobal.ini.getint('settings','log_file_number',10))
rotatingfile.setLevel(convertini2logger[botsglobal.ini.get('settings','log_file_level','ERROR')])
fileformat = logging.Formatter("%(asctime)s %(levelname)-8s %(name)s : %(message)s",'%Y%m%d %H:%M:%S')
rotatingfile.setFormatter(fileformat)
rotatingfile.doRollover() #each run a new log file is used; old one is rotated
# add rotating file handler to main logger
botsglobal.logger.addHandler(rotatingfile)
#logger for trace of mapping; tried to use filters but got this not to work.....
botsglobal.logmap = logging.getLogger('bots.map')
if not botsglobal.ini.getboolean('settings','mappingdebug',False):
botsglobal.logmap.setLevel(logging.CRITICAL)
#logger for reading edifile. is now used only very limited (1 place); is done with 'if'
#~ botsglobal.ini.getboolean('settings','readrecorddebug',False)
# create console handler
if botsglobal.ini.getboolean('settings','log_console',True):
console = logging.StreamHandler()
console.setLevel(logging.INFO)
consuleformat = logging.Formatter("%(levelname)-8s %(message)s")
console.setFormatter(consuleformat) # add formatter to console
botsglobal.logger.addHandler(console) # add console to logger
def connect():
#different connect code per tyoe of database
if botsglobal.settings.DATABASE_ENGINE == 'sqlite3':
#sqlite has some more fiddling; in separate file. Mainly because of some other method of parameter passing.
if not os.path.isfile(botsglobal.settings.DATABASE_NAME):
raise botslib.PanicError(_(u'Could not find database file for SQLite'))
import botssqlite
botsglobal.db = botssqlite.connect(database = botsglobal.settings.DATABASE_NAME)
elif botsglobal.settings.DATABASE_ENGINE == 'mysql':
import MySQLdb
from MySQLdb import cursors
botsglobal.db = MySQLdb.connect(host=botsglobal.settings.DATABASE_HOST,
port=int(botsglobal.settings.DATABASE_PORT),
db=botsglobal.settings.DATABASE_NAME,
user=botsglobal.settings.DATABASE_USER,
passwd=botsglobal.settings.DATABASE_PASSWORD,
cursorclass=cursors.DictCursor,
**botsglobal.settings.DATABASE_OPTIONS)
elif botsglobal.settings.DATABASE_ENGINE == 'postgresql_psycopg2':
import psycopg2
import psycopg2.extensions
import psycopg2.extras
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
botsglobal.db = psycopg2.connect( 'host=%s dbname=%s user=%s password=%s'%( botsglobal.settings.DATABASE_HOST,
botsglobal.settings.DATABASE_NAME,
botsglobal.settings.DATABASE_USER,
botsglobal.settings.DATABASE_PASSWORD),connection_factory=psycopg2.extras.DictConnection)
botsglobal.db.set_client_encoding('UNICODE')
| Python |
import sys
import copy
import datetime
import django
from django.core.paginator import Paginator,EmptyPage, InvalidPage
from django.utils.translation import ugettext as _
import models
import botsglobal
from botsconfig import *
def preparereport2view(post,runidta):
terugpost = post.copy()
thisrun = models.report.objects.get(idta=runidta)
terugpost['datefrom'] = thisrun.ts
try:
nextrun = thisrun.get_next_by_ts()
terugpost['dateuntil'] = nextrun.ts
except:
terugpost['dateuntil'] = datetimeuntil()
return terugpost
def changepostparameters(post,type):
terugpost = post.copy()
if type == 'confirm2in':
for key in ['confirmtype','confirmed','fromchannel','tochannel']:
terugpost.pop(key)[0]
terugpost['ineditype'] = terugpost.pop('editype')[0]
terugpost['inmessagetype'] = terugpost.pop('messagetype')[0]
#~ terugpost['outeditype'] = ''
#~ terugpost['outmessagetype'] = ''
elif type == 'confirm2out':
for key in ['confirmtype','confirmed','fromchannel','tochannel']:
terugpost.pop(key)[0]
elif type == 'out2in':
terugpost['outeditype'] = terugpost.pop('editype')[0]
terugpost['outmessagetype'] = terugpost.pop('messagetype')[0]
#~ terugpost['ineditype'] = ''
#~ terugpost['inmessagetype'] = ''
elif type == 'out2confirm':
for key in ['lastrun']:
terugpost.pop(key)[0]
elif type == 'in2out':
terugpost['editype'] = terugpost.pop('outeditype')[0]
terugpost['messagetype'] = terugpost.pop('outmessagetype')[0]
for key in ['ineditype','inmessagetype']:
terugpost.pop(key)[0]
elif type == 'in2confirm':
terugpost['editype'] = terugpost.pop('outeditype')[0]
terugpost['messagetype'] = terugpost.pop('outmessagetype')[0]
for key in ['lastrun','statust','ineditype','inmessagetype']:
terugpost.pop(key)[0]
elif type == '2process':
for key in terugpost.keys():
if key not in ['datefrom','dateuntil','lastrun','idroute']:
terugpost.pop(key)[0]
elif type == 'fromprocess':
pass #is OK, all values are used
terugpost['sortedby'] = 'ts'
terugpost['sortedasc'] = False
terugpost['page'] = 1
return terugpost
def django_trace_origin(idta,where):
''' bots traces back all from the current step/ta.
where is a dict that is used to indicate a condition.
eg: {'status':EXTERNIN}
If bots finds a ta for which this is true, the ta is added to a list.
The list is returned when all tracing is done, and contains all ta's for which 'where' is True
'''
def trace_recurse(ta):
''' recursive
walk over ta's backward (to origin).
if condition is met, add the ta to a list
'''
for parent in get_parent(ta):
donelijst.append(parent.idta)
for key,value in where.items():
if getattr(parent,key) != value:
break
else: #all where-criteria are true; check if we already have this ta
teruglijst.append(parent)
trace_recurse(parent)
def get_parent(ta):
''' yields the parents of a ta '''
if ta.parent:
if ta.parent not in donelijst: #search via parent
yield models.ta.objects.get(idta=ta.parent)
else:
for parent in models.ta.objects.filter(child=ta.idta).all():
if parent.idta in donelijst:
continue
yield parent
donelijst = []
teruglijst = []
ta = models.ta.objects.get(idta=idta)
trace_recurse(ta)
return teruglijst
def trace_document(pquery):
''' trace forward & backwardfrom the current step/ta (status SPLITUP).
gathers confirm information
'''
def trace_forward(ta):
''' recursive. walk over ta's forward (to exit). '''
if ta.child:
child = models.ta.objects.get(idta=ta.child)
else:
try:
child = models.ta.objects.filter(parent=ta.idta).all()[0]
except IndexError:
return #no result, return
if child.confirmasked:
taorg.confirmtext += _(u'Confirm send: %(confirmasked)s; confirmed: %(confirmed)s; confirmtype: %(confirmtype)s\n')%{'confirmasked':child.confirmasked,'confirmed':child.confirmed,'confirmtype':child.confirmtype}
if child.status==EXTERNOUT:
taorg.outgoing = child.idta
taorg.channel = child.tochannel
trace_forward(child)
def trace_back(ta):
''' recursive. walk over ta's backward (to origin). '''
if ta.parent:
parent = models.ta.objects.get(idta=ta.parent)
else:
try:
parent = models.ta.objects.filter(child=ta.idta).all()[0] #just get one parent
except IndexError:
return #no result, return
if parent.confirmasked:
taorg.confirmtext += u'Confirm asked: %(confirmasked)s; confirmed: %(confirmed)s; confirmtype: %(confirmtype)s\n'%{'confirmasked':parent.confirmasked,'confirmed':parent.confirmed,'confirmtype':parent.confirmtype}
if parent.status==EXTERNIN:
taorg.incoming = parent.idta
taorg.channel = parent.fromchannel
trace_back(parent)
#main for trace_document*****************
for taorg in pquery.object_list:
taorg.confirmtext = u''
if taorg.status == SPLITUP:
trace_back(taorg)
else:
trace_forward(taorg)
if not taorg.confirmtext:
taorg.confirmtext = u'---'
def gettrace(ta):
''' recursive. Build trace (tree of ta's).'''
if ta.child: #has a explicit child
ta.talijst = [models.ta.objects.get(idta=ta.child)]
else: #search in ta-table who is reffering to ta
ta.talijst = list(models.ta.objects.filter(parent=ta.idta).all())
for child in ta.talijst:
gettrace(child)
def trace2delete(trace):
def gathermember(ta):
memberlist.append(ta)
for child in ta.talijst:
gathermember(child)
def gatherdelete(ta):
if ta.status==MERGED:
for includedta in models.ta.objects.filter(child=ta.idta,status=TRANSLATED).all(): #select all db-ta's included in MERGED ta
if includedta not in memberlist:
#~ print 'not found idta',includedta.idta, 'not to deletelist:',ta.idta
return
deletelist.append(ta)
for child in ta.talijst:
gatherdelete(child)
memberlist=[]
gathermember(trace) #zet alle idta in memberlist
#~ printlijst(memberlist, 'memberlist')
#~ printlijst(deletelist, 'deletelist')
deletelist=[]
gatherdelete(trace) #zet alle te deleten idta in deletelijst
#~ printlijst(deletelist, 'deletelist')
for ta in deletelist:
ta.delete()
def trace2detail(ta):
def newbranche(ta,level=0):
def dota(ta, isfirststep = False):
if isfirststep:
if not level:
ta.ind= _(u'in')
else:
ta.ind = _(u'split>>>')
elif ta.status==MERGED and ta.nrmessages>1:
ta.ind = _(u'merge<<<')
elif ta.status==EXTERNOUT:
ta.ind = _(u'out')
else:
ta.ind =''
#~ ta.action = models.ta.objects.only('filename').get(idta=ta.script)
ta.channel=ta.fromchannel
if ta.tochannel:
ta.channel=ta.tochannel
detaillist.append(ta)
lengtetalijst = len(ta.talijst)
if lengtetalijst > 1:
for child in ta.talijst:
newbranche(child,level=level+1)
elif lengtetalijst == 1:
dota(ta.talijst[0])
#start new level
dota(ta,isfirststep = True)
detaillist=[]
newbranche(ta)
return detaillist
def datetimefrom():
#~ terug = datetime.datetime.today()-datetime.timedelta(1860)
terug = datetime.datetime.today()-datetime.timedelta(days=botsglobal.ini.getint('settings','maxdays',30))
#~ return terug.strftime('%Y-%m-%d %H:%M:%S')
return terug.strftime('%Y-%m-%d 00:00:00')
def datetimeuntil():
terug = datetime.datetime.today()
#~ return terug.strftime('%Y-%m-%d %H:%M:%S')
return terug.strftime('%Y-%m-%d 23:59:59')
def handlepagination(requestpost,cleaned_data):
''' use requestpost to set criteria for pagination in cleaned_data'''
if "first" in requestpost:
cleaned_data['page'] = 1
elif "previous" in requestpost:
cleaned_data['page'] = cleaned_data['page'] - 1
elif "next" in requestpost:
cleaned_data['page'] = cleaned_data['page'] + 1
elif "last" in requestpost:
cleaned_data['page']=sys.maxint
elif "order" in requestpost: #change the sorting order
if requestpost['order'] == cleaned_data['sortedby']: #sort same row, but desc->asc etc
cleaned_data['sortedasc'] = not cleaned_data['sortedasc']
else:
cleaned_data['sortedby'] = requestpost['order'].lower()
if cleaned_data['sortedby'] == 'ts':
cleaned_data['sortedasc'] = False
else:
cleaned_data['sortedasc'] = True
def render(request,form,query=None):
return django.shortcuts.render_to_response(form.template, {'form': form,"queryset":query},context_instance=django.template.RequestContext(request))
def getidtalastrun():
return models.filereport.objects.all().aggregate(django.db.models.Max('reportidta'))['reportidta__max']
def filterquery(query , org_cleaned_data, incoming=False):
''' use the data of the form (mostly in hidden fields) to do the query.'''
#~ print 'filterquery',org_cleaned_data
#~ sortedasc2str =
cleaned_data = copy.copy(org_cleaned_data) #copy because it it destroyed in setting up query
page = cleaned_data.pop('page') #do not use this in query, use in paginator
if 'dateuntil' in cleaned_data:
query = query.filter(ts__lt=cleaned_data.pop('dateuntil'))
if 'datefrom' in cleaned_data:
query = query.filter(ts__gte=cleaned_data.pop('datefrom'))
if 'botskey' in cleaned_data and cleaned_data['botskey']:
#~ query = query.filter(botskey__icontains=cleaned_data.pop('botskey')) #is slow for big databases.
query = query.filter(botskey__exact=cleaned_data.pop('botskey'))
if 'sortedby' in cleaned_data:
query = query.order_by({True:'',False:'-'}[cleaned_data.pop('sortedasc')] + cleaned_data.pop('sortedby'))
if 'lastrun' in cleaned_data:
if cleaned_data.pop('lastrun'):
idtalastrun = getidtalastrun()
if idtalastrun: #if no result (=None): there are no filereports.
if incoming: #detect if incoming; do other selection
query = query.filter(reportidta=idtalastrun)
else:
query = query.filter(idta__gt=idtalastrun)
for key,value in cleaned_data.items():
if not value:
del cleaned_data[key]
query = query.filter(**cleaned_data)
paginator = Paginator(query, botsglobal.ini.getint('settings','limit',30))
try:
return paginator.page(page)
except EmptyPage, InvalidPage: #page does not exist: use last page
lastpage = paginator.num_pages
org_cleaned_data['page']=lastpage #change value in form as well!!
return paginator.page(lastpage)
| Python |
import shutil
import time
from django.utils.translation import ugettext as _
#bots-modules
import botslib
import botsglobal
import grammar
import outmessage
from botsconfig import *
@botslib.log_session
def mergemessages(startstatus=TRANSLATED,endstatus=MERGED,idroute=''):
''' Merges en envelopes several messages to one file;
In db-ta: attribute 'merge' indicates message should be merged with similar messages; 'merge' is generated in translation from messagetype-grammar
If merge==False: 1 message per envelope - no merging, else append all similar messages to one file
Implementation as separate loops: one for merge&envelope, another for enveloping only
db-ta status TRANSLATED---->MERGED
'''
outerqueryparameters = {'status':startstatus,'statust':OK,'idroute':idroute,'rootidta':botslib.get_minta4query(),'merge':False}
#**********for messages only to envelope (no merging)
for row in botslib.query(u'''SELECT editype,messagetype,frompartner,topartner,testindicator,charset,contenttype,tochannel,envelope,nrmessages,idta,filename,idroute,merge
FROM ta
WHERE idta>%(rootidta)s
AND status=%(status)s
AND statust=%(statust)s
AND idroute=%(idroute)s
AND merge=%(merge)s
''',
outerqueryparameters):
try:
ta_info = dict([(key,row[key]) for key in row.keys()])
#~ ta_info={'merge':False,'idroute':idroute}
#~ for key in row.keys():
#~ ta_info[key] = row[key]
ta_fromfile = botslib.OldTransaction(row['idta']) #edi message to envelope
ta_tofile=ta_fromfile.copyta(status=endstatus) #edifile for enveloped message; attributes of not-enveloped message are copied...
#~ ta_fromfile.update(child=ta_tofile.idta) #??there is already a parent-child relation (1-1)...
ta_info['filename'] = str(ta_tofile.idta) #create filename for enveloped message
botsglobal.logger.debug(u'Envelope 1 message editype: %s, messagetype: %s.',ta_info['editype'],ta_info['messagetype'])
envelope(ta_info,[row['filename']])
except:
txt=botslib.txtexc()
ta_tofile.update(statust=ERROR,errortext=txt)
else:
ta_fromfile.update(statust=DONE)
ta_tofile.update(statust=OK,**ta_info) #selection is used to update enveloped message;
#**********for messages to merge & envelope
#all GROUP BY fields must be used in SELECT!
#as files get merged: can not copy idta; must extract relevant attributes.
outerqueryparameters['merge']=True
for row in botslib.query(u'''SELECT editype,messagetype,frompartner,topartner,tochannel,testindicator,charset,contenttype,envelope,sum(nrmessages) as nrmessages
FROM ta
WHERE idta>%(rootidta)s
AND status=%(status)s
AND statust=%(statust)s
AND idroute=%(idroute)s
AND merge=%(merge)s
GROUP BY editype,messagetype,frompartner,topartner,tochannel,testindicator,charset,contenttype,envelope
''',
outerqueryparameters):
try:
ta_info = dict([(key,row[key]) for key in row.keys()])
ta_info.update({'merge':False,'idroute':idroute})
#~ for key in row.keys():
#~ ta_info[key] = row[key]
ta_tofile=botslib.NewTransaction(status=endstatus,idroute=idroute) #edifile for enveloped messages
ta_info['filename'] = str(ta_tofile.idta) #create filename for enveloped message
innerqueryparameters = ta_info.copy()
innerqueryparameters.update(outerqueryparameters)
ta_list=[]
#gather individual idta and filenames
#explicitly allow formpartner/topartner to be None/NULL
for row2 in botslib.query(u'''SELECT idta, filename
FROM ta
WHERE idta>%(rootidta)s
AND status=%(status)s
AND statust=%(statust)s
AND merge=%(merge)s
AND editype=%(editype)s
AND messagetype=%(messagetype)s
AND (frompartner=%(frompartner)s OR frompartner IS NULL)
AND (topartner=%(topartner)s OR topartner IS NULL)
AND tochannel=%(tochannel)s
AND testindicator=%(testindicator)s
AND charset=%(charset)s
AND idroute=%(idroute)s
''',
innerqueryparameters):
ta_fromfile = botslib.OldTransaction(row2['idta']) #edi message to envelope
ta_fromfile.update(statust=DONE,child=ta_tofile.idta) #st child because of n->1 relation
ta_list.append(row2['filename'])
botsglobal.logger.debug(u'Merge and envelope: editype: %s, messagetype: %s, %s messages',ta_info['editype'],ta_info['messagetype'],ta_info['nrmessages'])
envelope(ta_info,ta_list)
except:
txt=botslib.txtexc()
ta_tofile.mergefailure()
ta_tofile.update(statust=ERROR,errortext=txt)
else:
ta_tofile.update(statust=OK,**ta_info)
def envelope(ta_info,ta_list):
''' dispatch function for class Envelope and subclasses.
editype, edimessage and envelope essential for enveloping.
determine the class for enveloping:
1. empty string: no enveloping (class noenvelope); file(s) is/are just copied. No user scripting for envelope.
2. if envelope is a class in this module, use it
3. if editype is a class in this module, use it
4. if user defined enveloping in usersys/envelope/<editype>/<envelope>.<envelope>, use it (user defined scripting overrides)
Always check if user envelope script. user exits extends/replaces default enveloping.
'''
#determine which class to use for enveloping
userscript = scriptname = None
if not ta_info['envelope']: #used when enveloping is just appending files.
classtocall = noenvelope
else:
try: #see if the is user scripted enveloping
userscript,scriptname = botslib.botsimport('envelopescripts',ta_info['editype'] + '.' + ta_info['envelope'])
except ImportError: #other errors, eg syntax errors are just passed
pass
#first: check if there is a class with name ta_info['envelope'] in the user scripting
#this allows complete enveloping in user scripting
if userscript and hasattr(userscript,ta_info['envelope']):
classtocall = getattr(userscript,ta_info['envelope'])
else:
try: #check if there is a envelope class with name ta_info['envelope'] in this file (envelope.py)
classtocall = globals()[ta_info['envelope']]
except KeyError:
try: #check if there is a envelope class with name ta_info['editype'] in this file (envelope.py).
#20110919: this should disappear in the long run....use this now for orders2printenvelope and myxmlenvelop
#reason to disappear: confusing when setting up.
classtocall = globals()[ta_info['editype']]
except KeyError:
raise botslib.OutMessageError(_(u'Not found envelope "$envelope".'),envelope=ta_info['editype'])
env = classtocall(ta_info,ta_list,userscript,scriptname)
env.run()
class Envelope(object):
''' Base Class for enveloping; use subclasses.'''
def __init__(self,ta_info,ta_list,userscript,scriptname):
self.ta_info = ta_info
self.ta_list = ta_list
self.userscript = userscript
self.scriptname = scriptname
def _openoutenvelope(self,editype, messagetype_or_envelope):
''' make an outmessage object; read the grammar.'''
#self.ta_info now contains information from ta: editype, messagetype,testindicator,charset,envelope, contenttype
self.out = outmessage.outmessage_init(**self.ta_info) #make outmessage object. Init with self.out.ta_info
#read grammar for envelopesyntax. Remark: self.ta_info is not updated now
self.out.outmessagegrammarread(editype, messagetype_or_envelope)
#self.out.ta_info can contain partner dependent parameters. the partner dependent parameters have overwritten parameters fro mmessage/envelope
def writefilelist(self,tofile):
for filename in self.ta_list:
fromfile = botslib.opendata(filename, 'rb',self.ta_info['charset'])
shutil.copyfileobj(fromfile,tofile)
fromfile.close()
def filelist2absolutepaths(self):
''' utility function; some classes need absolute filenames eg for xml-including'''
return [botslib.abspathdata(filename) for filename in self.ta_list]
class noenvelope(Envelope):
''' Only copies the input files to one output file.'''
def run(self):
botslib.tryrunscript(self.userscript,self.scriptname,'ta_infocontent',ta_info=self.ta_info)
tofile = botslib.opendata(self.ta_info['filename'],'wb',self.ta_info['charset'])
self.writefilelist(tofile)
tofile.close()
class fixed(noenvelope):
pass
class csv(noenvelope):
pass
class csvheader(Envelope):
def run(self):
self._openoutenvelope(self.ta_info['editype'],self.ta_info['messagetype'])
botslib.tryrunscript(self.userscript,self.scriptname,'ta_infocontent',ta_info=self.ta_info)
#self.ta_info is not overwritten
tofile = botslib.opendata(self.ta_info['filename'],'wb',self.ta_info['charset'])
headers = dict([(field[ID],field[ID]) for field in self.out.defmessage.structure[0][FIELDS]])
self.out.put(headers)
self.out.tree2records(self.out.root)
tofile.write(self.out._record2string(self.out.records[0]))
self.writefilelist(tofile)
tofile.close()
class edifact(Envelope):
''' Generate UNB and UNZ segment; fill with data, write to interchange-file.'''
def run(self):
if not self.ta_info['topartner'] or not self.ta_info['frompartner']:
raise botslib.OutMessageError(_(u'In enveloping "frompartner" or "topartner" unknown: "$ta_info".'),ta_info=self.ta_info)
self._openoutenvelope(self.ta_info['editype'],self.ta_info['envelope'])
self.ta_info.update(self.out.ta_info)
botslib.tryrunscript(self.userscript,self.scriptname,'ta_infocontent',ta_info=self.ta_info)
#version dependent enveloping
writeUNA = False
if self.ta_info['version']<'4':
date = time.strftime('%y%m%d')
reserve = ' '
if self.ta_info['charset'] != 'UNOA':
writeUNA = True
else:
date = time.strftime('%Y%m%d')
reserve = self.ta_info['reserve']
if self.ta_info['charset'] not in ['UNOA','UNOB']:
writeUNA = True
#UNB counter is per sender or receiver
if botsglobal.ini.getboolean('settings','interchangecontrolperpartner',False):
self.ta_info['reference'] = str(botslib.unique('unbcounter_' + self.ta_info['topartner']))
else:
self.ta_info['reference'] = str(botslib.unique('unbcounter_' + self.ta_info['frompartner']))
#testindicator is more complex:
if self.ta_info['testindicator'] and self.ta_info['testindicator']!='0': #first check value from ta; do not use default
testindicator = '1'
elif self.ta_info['UNB.0035'] != '0': #than check values from grammar
testindicator = '1'
else:
testindicator = ''
#build the envelope segments (that is, the tree from which the segments will be generated)
self.out.put({'BOTSID':'UNB',
'S001.0001':self.ta_info['charset'],
'S001.0002':self.ta_info['version'],
'S002.0004':self.ta_info['frompartner'],
'S003.0010':self.ta_info['topartner'],
'S004.0017':date,
'S004.0019':time.strftime('%H%M'),
'0020':self.ta_info['reference']})
#the following fields are conditional; do not write these when empty string (separator compression does take empty strings into account)
if self.ta_info['UNB.S002.0007']:
self.out.put({'BOTSID':'UNB','S002.0007': self.ta_info['UNB.S002.0007']})
if self.ta_info['UNB.S003.0007']:
self.out.put({'BOTSID':'UNB','S003.0007': self.ta_info['UNB.S003.0007']})
if self.ta_info['UNB.0026']:
self.out.put({'BOTSID':'UNB','0026': self.ta_info['UNB.0026']})
if testindicator:
self.out.put({'BOTSID':'UNB','0035': testindicator})
self.out.put({'BOTSID':'UNB'},{'BOTSID':'UNZ','0036':self.ta_info['nrmessages'],'0020':self.ta_info['reference']}) #dummy segment; is not used
#user exit
botslib.tryrunscript(self.userscript,self.scriptname,'envelopecontent',ta_info=self.ta_info,out=self.out)
#convert the tree into segments; here only the UNB is written (first segment)
self.out.normalisetree(self.out.root)
self.out.tree2records(self.out.root)
#start doing the actual writing:
tofile = botslib.opendata(self.ta_info['filename'],'wb',self.ta_info['charset'])
if writeUNA or self.ta_info['forceUNA']:
tofile.write('UNA'+self.ta_info['sfield_sep']+self.ta_info['field_sep']+self.ta_info['decimaal']+self.ta_info['escape']+ reserve +self.ta_info['record_sep']+self.ta_info['add_crlfafterrecord_sep'])
tofile.write(self.out._record2string(self.out.records[0]))
self.writefilelist(tofile)
tofile.write(self.out._record2string(self.out.records[-1]))
tofile.close()
if self.ta_info['messagetype'][:6]!='CONTRL' and botslib.checkconfirmrules('ask-edifact-CONTRL',idroute=self.ta_info['idroute'],idchannel=self.ta_info['tochannel'],
topartner=self.ta_info['topartner'],frompartner=self.ta_info['frompartner'],
editype=self.ta_info['editype'],messagetype=self.ta_info['messagetype']):
self.ta_info['confirmtype'] = u'ask-edifact-CONTRL'
self.ta_info['confirmasked'] = True
class tradacoms(Envelope):
''' Generate STX and END segment; fill with appropriate data, write to interchange file.'''
def run(self):
if not self.ta_info['topartner'] or not self.ta_info['frompartner']:
raise botslib.OutMessageError(_(u'In enveloping "frompartner" or "topartner" unknown: "$ta_info".'),ta_info=self.ta_info)
self._openoutenvelope(self.ta_info['editype'],self.ta_info['envelope'])
self.ta_info.update(self.out.ta_info)
botslib.tryrunscript(self.userscript,self.scriptname,'ta_infocontent',ta_info=self.ta_info)
#prepare data for envelope
if botsglobal.ini.getboolean('settings','interchangecontrolperpartner',False):
self.ta_info['reference'] = str(botslib.unique('stxcounter_' + self.ta_info['topartner']))
else:
self.ta_info['reference'] = str(botslib.unique('stxcounter_' + self.ta_info['frompartner']))
#build the envelope segments (that is, the tree from which the segments will be generated)
self.out.put({'BOTSID':'STX',
'STDS1':self.ta_info['STX.STDS1'],
'STDS2':self.ta_info['STX.STDS2'],
'FROM.01':self.ta_info['frompartner'],
'UNTO.01':self.ta_info['topartner'],
'TRDT.01':time.strftime('%y%m%d'),
'TRDT.02':time.strftime('%H%M%S'),
'SNRF':self.ta_info['reference']})
if self.ta_info['STX.FROM.02']:
self.out.put({'BOTSID':'STX','FROM.02':self.ta_info['STX.FROM.02']})
if self.ta_info['STX.UNTO.02']:
self.out.put({'BOTSID':'STX','UNTO.02':self.ta_info['STX.UNTO.02']})
if self.ta_info['STX.APRF']:
self.out.put({'BOTSID':'STX','APRF':self.ta_info['STX.APRF']})
if self.ta_info['STX.PRCD']:
self.out.put({'BOTSID':'STX','PRCD':self.ta_info['STX.PRCD']})
self.out.put({'BOTSID':'STX'},{'BOTSID':'END','NMST':self.ta_info['nrmessages']}) #dummy segment; is not used
#user exit
botslib.tryrunscript(self.userscript,self.scriptname,'envelopecontent',ta_info=self.ta_info,out=self.out)
#convert the tree into segments; here only the STX is written (first segment)
self.out.normalisetree(self.out.root)
self.out.tree2records(self.out.root)
#start doing the actual writing:
tofile = botslib.opendata(self.ta_info['filename'],'wb',self.ta_info['charset'])
tofile.write(self.out._record2string(self.out.records[0]))
self.writefilelist(tofile)
tofile.write(self.out._record2string(self.out.records[-1]))
tofile.close()
class template(Envelope):
def run(self):
''' class for (test) orderprint; delevers a valid html-file.
Uses a kid-template for the enveloping/merging.
use kid to write; no envelope grammar is used
'''
try:
import kid
except:
txt=botslib.txtexc()
raise ImportError(_(u'Dependency failure: editype "template" requires python library "kid". Error:\n%s'%txt))
defmessage = grammar.grammarread(self.ta_info['editype'],self.ta_info['messagetype']) #needed because we do not know envelope; read syntax for editype/messagetype
self.ta_info.update(defmessage.syntax)
botslib.tryrunscript(self.userscript,self.scriptname,'ta_infocontent',ta_info=self.ta_info)
if not self.ta_info['envelope-template']:
raise botslib.OutMessageError(_(u'While enveloping in "$editype.$messagetype": syntax option "envelope-template" not filled; is required.'),editype=self.ta_info['editype'],messagetype=self.ta_info['messagetype'])
templatefile = botslib.abspath('templates',self.ta_info['envelope-template'])
ta_list = self.filelist2absolutepaths()
try:
botsglobal.logger.debug(u'Start writing envelope to file "%s".',self.ta_info['filename'])
ediprint = kid.Template(file=templatefile, data=ta_list) #init template; pass list with filenames
except:
txt=botslib.txtexc()
raise botslib.OutMessageError(_(u'While enveloping in "$editype.$messagetype", error:\n$txt'),editype=self.ta_info['editype'],messagetype=self.ta_info['messagetype'],txt=txt)
try:
f = botslib.opendata(self.ta_info['filename'],'wb')
ediprint.write(f,
encoding=self.ta_info['charset'],
output=self.ta_info['output'])
except:
txt=botslib.txtexc()
raise botslib.OutMessageError(_(u'While enveloping in "$editype.$messagetype", error:\n$txt'),editype=self.ta_info['editype'],messagetype=self.ta_info['messagetype'],txt=txt)
class orders2printenvelope(template):
pass
class templatehtml(Envelope):
def run(self):
''' class for (test) orderprint; delevers a valid html-file.
Uses a kid-template for the enveloping/merging.
use kid to write; no envelope grammar is used
'''
try:
from genshi.template import TemplateLoader
except:
txt=botslib.txtexc()
raise ImportError(_(u'Dependency failure: editype "template" requires python library "genshi". Error:\n%s'%txt))
defmessage = grammar.grammarread(self.ta_info['editype'],self.ta_info['messagetype']) #needed because we do not know envelope; read syntax for editype/messagetype
self.ta_info.update(defmessage.syntax)
botslib.tryrunscript(self.userscript,self.scriptname,'ta_infocontent',ta_info=self.ta_info)
if not self.ta_info['envelope-template']:
raise botslib.OutMessageError(_(u'While enveloping in "$editype.$messagetype": syntax option "envelope-template" not filled; is required.'),editype=self.ta_info['editype'],messagetype=self.ta_info['messagetype'])
templatefile = botslib.abspath('templateshtml',self.ta_info['envelope-template'])
ta_list = self.filelist2absolutepaths()
try:
botsglobal.logger.debug(u'Start writing envelope to file "%s".',self.ta_info['filename'])
loader = TemplateLoader(auto_reload=False)
tmpl = loader.load(templatefile)
except:
txt=botslib.txtexc()
raise botslib.OutMessageError(_(u'While enveloping in "$editype.$messagetype", error:\n$txt'),editype=self.ta_info['editype'],messagetype=self.ta_info['messagetype'],txt=txt)
try:
f = botslib.opendata(self.ta_info['filename'],'wb')
stream = tmpl.generate(data=ta_list)
stream.render(method='xhtml',encoding=self.ta_info['charset'],out=f)
except:
txt=botslib.txtexc()
raise botslib.OutMessageError(_(u'While enveloping in "$editype.$messagetype", error:\n$txt'),editype=self.ta_info['editype'],messagetype=self.ta_info['messagetype'],txt=txt)
class x12(Envelope):
''' Generate envelope segments; fill with appropriate data, write to interchange-file.'''
def run(self):
if not self.ta_info['topartner'] or not self.ta_info['frompartner']:
raise botslib.OutMessageError(_(u'In enveloping "frompartner" or "topartner" unknown: "$ta_info".'),ta_info=self.ta_info)
self._openoutenvelope(self.ta_info['editype'],self.ta_info['envelope'])
self.ta_info.update(self.out.ta_info)
#need to know the functionalgroup code:
defmessage = grammar.grammarread(self.ta_info['editype'],self.ta_info['messagetype'])
self.ta_info['functionalgroup'] = defmessage.syntax['functionalgroup']
botslib.tryrunscript(self.userscript,self.scriptname,'ta_infocontent',ta_info=self.ta_info)
#prepare data for envelope
ISA09date = time.strftime('%y%m%d')
#test indicator can either be from configuration (self.ta_info['ISA15']) or by mapping (self.ta_info['testindicator'])
#mapping overrules.
if self.ta_info['testindicator'] and self.ta_info['testindicator']!='0': #'0' is default value (in db)
testindicator = self.ta_info['testindicator']
else:
testindicator = self.ta_info['ISA15']
#~ print self.ta_info['messagetype'], 'grammar:',self.ta_info['ISA15'],'ta:',self.ta_info['testindicator'],'out:',testindicator
if botsglobal.ini.getboolean('settings','interchangecontrolperpartner',False):
self.ta_info['reference'] = str(botslib.unique('isacounter_' + self.ta_info['topartner']))
else:
self.ta_info['reference'] = str(botslib.unique('isacounter_' + self.ta_info['frompartner']))
#ISA06 and GS02 can be different; eg ISA06 is a service provider.
#ISA06 and GS02 can be in the syntax....
ISA06 = self.ta_info.get('ISA06',self.ta_info['frompartner'])
ISA06 = ISA06.ljust(15) #add spaces; is fixed length
GS02 = self.ta_info.get('GS02',self.ta_info['frompartner'])
#also for ISA08 and GS03
ISA08 = self.ta_info.get('ISA08',self.ta_info['topartner'])
ISA08 = ISA08.ljust(15) #add spaces; is fixed length
GS03 = self.ta_info.get('GS03',self.ta_info['topartner'])
#build the envelope segments (that is, the tree from which the segments will be generated)
self.out.put({'BOTSID':'ISA',
'ISA01':self.ta_info['ISA01'],
'ISA02':self.ta_info['ISA02'],
'ISA03':self.ta_info['ISA03'],
'ISA04':self.ta_info['ISA04'],
'ISA05':self.ta_info['ISA05'],
'ISA06':ISA06,
'ISA07':self.ta_info['ISA07'],
'ISA08':ISA08,
'ISA09':ISA09date,
'ISA10':time.strftime('%H%M'),
'ISA11':self.ta_info['ISA11'], #if ISA version > 00403, replaced by reprtion separator
'ISA12':self.ta_info['version'],
'ISA13':self.ta_info['reference'],
'ISA14':self.ta_info['ISA14'],
'ISA15':testindicator},strip=False) #MIND: strip=False: ISA fields shoudl not be stripped as it is soemwhat like fixed-length
self.out.put({'BOTSID':'ISA'},{'BOTSID':'IEA','IEA01':'1','IEA02':self.ta_info['reference']})
GS08 = self.ta_info['messagetype'][3:]
if GS08[:6]<'004010':
GS04date = time.strftime('%y%m%d')
else:
GS04date = time.strftime('%Y%m%d')
self.out.put({'BOTSID':'ISA'},{'BOTSID':'GS',
'GS01':self.ta_info['functionalgroup'],
'GS02':GS02,
'GS03':GS03,
'GS04':GS04date,
'GS05':time.strftime('%H%M'),
'GS06':self.ta_info['reference'],
'GS07':self.ta_info['GS07'],
'GS08':GS08})
self.out.put({'BOTSID':'ISA'},{'BOTSID':'GS'},{'BOTSID':'GE','GE01':self.ta_info['nrmessages'],'GE02':self.ta_info['reference']}) #dummy segment; is not used
#user exit
botslib.tryrunscript(self.userscript,self.scriptname,'envelopecontent',ta_info=self.ta_info,out=self.out)
#convert the tree into segments; here only the UNB is written (first segment)
self.out.normalisetree(self.out.root)
self.out.tree2records(self.out.root)
#start doing the actual writing:
tofile = botslib.opendata(self.ta_info['filename'],'wb',self.ta_info['charset'])
ISAstring = self.out._record2string(self.out.records[0])
if self.ta_info['version']<'00403':
ISAstring = ISAstring[:103] + self.ta_info['field_sep']+ self.ta_info['sfield_sep'] + ISAstring[103:] #hack for strange characters at end of ISA; hardcoded
else:
ISAstring = ISAstring[:82] +self.ta_info['reserve'] + ISAstring[83:103] + self.ta_info['field_sep']+ self.ta_info['sfield_sep'] + ISAstring[103:] #hack for strange characters at end of ISA; hardcoded
tofile.write(ISAstring) #write ISA
tofile.write(self.out._record2string(self.out.records[1])) #write GS
self.writefilelist(tofile)
tofile.write(self.out._record2string(self.out.records[-2])) #write GE
tofile.write(self.out._record2string(self.out.records[-1])) #write IEA
tofile.close()
if self.ta_info['functionalgroup']!='FA' and botslib.checkconfirmrules('ask-x12-997',idroute=self.ta_info['idroute'],idchannel=self.ta_info['tochannel'],
topartner=self.ta_info['topartner'],frompartner=self.ta_info['frompartner'],
editype=self.ta_info['editype'],messagetype=self.ta_info['messagetype']):
self.ta_info['confirmtype'] = u'ask-x12-997'
self.ta_info['confirmasked'] = True
class jsonnocheck(noenvelope):
pass
class json(noenvelope):
pass
class xmlnocheck(noenvelope):
pass
class xml(noenvelope):
pass
class myxmlenvelop(xml):
''' old xml enveloping; name is kept for upward comp. & as example for xml enveloping'''
def run(self):
''' class for (test) xml envelope. There is no standardised XML-envelope!
writes a new XML-tree; uses places-holders for XML-files to include; real enveloping is done by ElementTree's include'''
include = '{http://www.w3.org/2001/XInclude}include'
self._openoutenvelope(self.ta_info['editype'],self.ta_info['envelope'])
botslib.tryrunscript(self.userscript,self.scriptname,'ta_infocontent',ta_info=self.ta_info)
#~ self.out.put({'BOTSID':'root','xmlns:xi':"http://www.w3.org/2001/XInclude"}) #works, but attribute is not removed bij ETI.include
self.out.put({'BOTSID':'root'}) #start filling out-tree
ta_list = self.filelist2absolutepaths()
for filename in ta_list:
self.out.put({'BOTSID':'root'},{'BOTSID':include,include + '__parse':'xml',include + '__href':filename})
self.out.envelopewrite(self.out.root) #'resolves' the included xml files
class db(Envelope):
''' Only copies the input files to one output file.'''
def run(self):
botslib.tryrunscript(self.userscript,self.scriptname,'ta_infocontent',ta_info=self.ta_info)
self.ta_info['filename'] = self.ta_list[0]
class raw(Envelope):
''' Only copies the input files to one output file.'''
def run(self):
botslib.tryrunscript(self.userscript,self.scriptname,'ta_infocontent',ta_info=self.ta_info)
self.ta_info['filename'] = self.ta_list[0]
| Python |
# Django settings for bots project.
import os
import bots
#*******settings for bots error reports**********************************
MANAGERS = ( #bots will send error reports to the MANAGERS
('name_manager', 'manager@domain.org'),
)
#~ EMAIL_HOST = 'smtp.gmail.com' #Default: 'localhost'
#~ EMAIL_PORT = '587' #Default: 25
#~ EMAIL_USE_TLS = True #Default: False
#~ EMAIL_HOST_USER = 'user@gmail.com' #Default: ''. Username to use for the SMTP server defined in EMAIL_HOST. If empty, Django won't attempt authentication.
#~ EMAIL_HOST_PASSWORD = '' #Default: ''. PASSWORD to use for the SMTP server defined in EMAIL_HOST. If empty, Django won't attempt authentication.
#~ SERVER_EMAIL = 'user@gmail.com' #Sender of bots error reports. Default: 'root@localhost'
#~ EMAIL_SUBJECT_PREFIX = '' #This is prepended on email subject.
#*********path settings*************************advised is not to change these values!!
PROJECT_PATH = os.path.abspath(os.path.dirname(bots.__file__))
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = PROJECT_PATH + '/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
#~ FILE_UPLOAD_TEMP_DIR = os.path.join(PROJECT_PATH, 'botssys/pluginsuploaded') #set in bots.ini
ROOT_URLCONF = 'bots.urls'
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
LOGOUT_URL = '/logout/'
#~ LOGOUT_REDIRECT_URL = #??not such parameter; is set in urls
TEMPLATE_DIRS = (
os.path.join(PROJECT_PATH, 'templates'),
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
#*********database settings*************************
#django-admin syncdb --pythonpath='/home/hje/botsup' --settings='bots.config.settings'
#SQLITE:
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = os.path.join(PROJECT_PATH, 'botssys/sqlitedb/botsdb') #path to database; if relative path: interpreted relative to bots root directory
DATABASE_USER = ''
DATABASE_PASSWORD = ''
DATABASE_HOST = ''
DATABASE_PORT = ''
DATABASE_OPTIONS = {}
#~ #MySQL:
#~ DATABASE_ENGINE = 'mysql'
#~ DATABASE_NAME = 'botsdb'
#~ DATABASE_USER = 'bots'
#~ DATABASE_PASSWORD = 'botsbots'
#~ DATABASE_HOST = '192.168.0.7'
#~ DATABASE_PORT = '3306'
#~ DATABASE_OPTIONS = {'use_unicode':True,'charset':'utf8',"init_command": 'SET storage_engine=INNODB'}
#PostgreSQL:
#~ DATABASE_ENGINE = 'postgresql_psycopg2'
#~ DATABASE_NAME = 'botsdb'
#~ DATABASE_USER = 'bots'
#~ DATABASE_PASSWORD = 'botsbots'
#~ DATABASE_HOST = '192.168.0.7'
#~ DATABASE_PORT = '5432'
#~ DATABASE_OPTIONS = {}
#*********sessions, cookies, log out time*************************
SESSION_EXPIRE_AT_BROWSER_CLOSE = True #True: always log in when browser is closed
SESSION_COOKIE_AGE = 3600 #seconds a user needs to login when no activity
SESSION_SAVE_EVERY_REQUEST = True #if True: SESSION_COOKIE_AGE is interpreted as: since last activity
#*********localization*************************
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Amsterdam'
DATE_FORMAT = "Y-m-d"
DATETIME_FORMAT = "Y-m-d G:i"
TIME_FORMAT = "G:i"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
#~ LANGUAGE_CODE = 'en-us'
LANGUAGE_CODE = 'en'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
#*************************************************************************
#*********other django setting. please consult django docs.***************
#set in bots.ini
#~ DEBUG = True
#~ TEMPLATE_DEBUG = DEBUG
SITE_ID = 1
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'm@-u37qiujmeqfbu$daaaaz)sp^7an4u@h=wfx9dd$$$zl2i*x9#awojdc'
ADMINS = (
('bots', 'your_email@domain.com'),
)
#save uploaded file (=plugin) always to file. no path for temp storage is used, so system default is used.
FILE_UPLOAD_HANDLERS = (
"django.core.files.uploadhandler.TemporaryFileUploadHandler",
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
#'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'bots.persistfilters.FilterPersistMiddleware',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'bots',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
)
| Python |
'''
code found at code.djangoproject.com/ticket/3777
'''
from django import http
class FilterPersistMiddleware(object):
def _get_default(self, key):
""" Gets any set default filters for the admin. Returns None if no
default is set. """
default = None
#~ default = settings.ADMIN_DEFAULT_FILTERS.get(key, None)
# Filters are allowed to be functions. If this key is one, call it.
if hasattr(default, '__call__'):
default = default()
return default
def process_request(self, request):
if '/admin/' not in request.path or request.method == 'POST':
return None
if request.META.has_key('HTTP_REFERER'):
referrer = request.META['HTTP_REFERER'].split('?')[0]
referrer = referrer[referrer.find('/admin'):len(referrer)]
else:
referrer = u''
popup = 'pop=1' in request.META['QUERY_STRING']
path = request.path
query_string = request.META['QUERY_STRING']
session = request.session
if session.get('redirected', False):#so that we dont loop once redirected
del session['redirected']
return None
key = 'key'+path.replace('/','_')
if popup:
key = 'popup'+key
if path == referrer:
""" We are in the same page as before. We assume that filters were
changed and update them. """
if query_string == '': #Filter is empty, delete it
if session.has_key(key):
del session[key]
return None
else:
request.session[key] = query_string
else:
""" We are are coming from another page. Set querystring to
saved or default value. """
query_string=session.get(key, self._get_default(key))
if query_string is not None:
redirect_to = path+'?'+query_string
request.session['redirected'] = True
return http.HttpResponseRedirect(redirect_to)
else:
return None
'''
Sample default filters:
from datetime import date
def _today():
return 'starttime__gte=' + date.today().isoformat()
# Default filters. Format: 'key_$url', where $url has slashes replaced
# with underscores
# value can either be a function or a string
ADMIN_DEFAULT_FILTERS= {
# display only events starting today
'key_admin_event_calendar_event_': _today,
# display active members
'key_admin_users_member_': 'is_active__exact=1',
# only show new suggestions
'key_admin_suggestions_suggestion_': 'status__exact=new',
}
''' | Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.